code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
"http://laurentszyster.be/blog/async_server/"
import socket, time
try:
SOCKET_FAMILIES = (socket.AF_INET, socket.AF_UNIX)
except:
SOCKET_FAMILIES = (socket.AF_INET, )
from allegra import async_loop, async_core, async_limits
class Listen (async_core.Dispatcher):
server_when = 0.0
ac_in_meter = ac_out_meter = server_dispatched = 0
def __init__ (
self, Dispatcher, addr, precision, max,
family=socket.AF_INET
):
assert (
type (precision) == int and precision > 0 and
type (max) == int and max > 0 and
family in SOCKET_FAMILIES
)
self.server_dispatchers = []
self.server_named = {}
self.Server_dispatcher = Dispatcher
self.server_precision = precision
self.create_socket (family, socket.SOCK_STREAM)
self.set_reuse_addr ()
self.bind (addr)
self.listen (max)
anonymous (self)
accept_all (self)
metered (self)
self.log ('listen %r' % (addr,), 'info')
def __repr__ (self):
return 'async-server id="%x"' % id (self)
def readable (self):
return self.accepting
def writable (self):
return False
def handle_accept (self):
try:
conn, addr = self.accept ()
except socket.error:
assert None == self.log (
'accept-bogus-socket', 'error'
)
return
#
# Medusa original comments from Sam Rushing:
#
# linux: on rare occasions we get a bogus socket back
# from accept. socketmodule.c:makesockaddr complains
# that the address family is unknown. We don't want
# the whole server to shut down because of this.
except TypeError:
assert None == self.log (
'accept-would-block', 'error'
)
return
#
# Medusa original comments from Sam Rushing:
#
# unpack non-sequence. this can happen when a read
# event fires on a listening socket, but when we call
# accept() we get EWOULDBLOCK, so dispatcher.accept()
# returns None. Seen on FreeBSD3.
name = self.server_resolved (addr)
if name != None:
try:
self.server_named[name] += 1
except KeyError:
self.server_named[name] = 1
if self.server_accepted (conn, addr, name):
self.server_accept (
conn, addr, name
)
else:
conn.close ()
return
if self.server_resolve == None:
self.server_unresolved (conn, addr)
conn.close ()
return
def resolve (name):
try:
self.server_named[name] += 1
except KeyError:
self.server_named[name] = 1
if (
name == None and
self.server_unresolved (conn, addr)
):
conn.close ()
elif self.server_accepted (conn, addr, name):
self.server_accept (
conn, addr, name
)
self.server_resolve (addr, resolve)
def handle_close (self):
"close all dispatchers, close the server and finalize it"
for dispatcher in tuple (self.server_dispatchers):
dispatcher.handle_close ()
self.server_stop (time.time ())
self.close ()
self.__dict__ = {}
#
# Breaks any circular reference through attributes, by
# clearing them all. Note that this prevents finalizations
# to be used with listeners, but anyway subclassing the
# stop and shutdown methods provides enough leverage to
# gracefully come to closure: the listener is most probably
# in the highest level of the application's instance tree.
#
# ... and sometimes damn hard to finalize ;-)
def server_unresolved (self, conn, addr):
assert None == self.log ('unresolved %r' % (addr,), 'debug')
return False # don't care!
def server_accept (self, conn, addr, name):
assert None == self.log ('accepted %r' % (addr,), 'debug')
now = time.time ()
dispatcher = self.Server_dispatcher ()
dispatcher.set_connection (conn, addr)
dispatcher.server_name = name
dispatcher.server_when = now
dispatcher.async_server = self
self.server_decorate (dispatcher, now)
if self.server_when == 0:
self.server_start (now)
self.server_dispatchers.append (dispatcher)
return dispatcher
def server_start (self, when):
"handle the client management startup"
self.server_when = when
async_loop.schedule (
when + self.server_precision, self.server_manage
)
assert None == self.log ('start', 'debug')
def server_manage (self, when):
if not self.server_dispatchers:
if self.accepting:
self.server_stop (when)
else:
self.handle_close ()
return
if self.server_limit != None:
for dispatcher in tuple (self.server_dispatchers):
if self.server_limit (dispatcher, when):
self.server_overflow (dispatcher)
return (when + self.server_precision, self.server_manage)
def server_overflow (self, dispatcher):
"assert debug log and close an overflowed dispatcher"
assert None == dispatcher.log ('limit overflow', 'debug')
dispatcher.handle_close ()
def server_meter (self, dispatcher):
"assert debug log and account I/O meters of a dispatcher"
assert None == dispatcher.log (
'in="%d" out="%d"' % (
dispatcher.ac_in_meter,
dispatcher.ac_out_meter
), 'debug'
)
self.ac_in_meter += dispatcher.ac_in_meter
self.ac_out_meter += dispatcher.ac_out_meter
self.server_dispatched += 1
def server_close (self, dispatcher):
"remove the dispatcher from list and meter dispatched"
name = dispatcher.server_name
if self.server_named[name] > 1:
self.server_named[name] -= 1
else:
del self.server_named[name]
self.server_dispatchers.remove (dispatcher)
self.server_meter (dispatcher)
dispatcher.async_server = None
def server_stop (self, when):
"handle the server scheduled or inpromptu stop"
if self.server_when:
self.log (
'stop dispatched="%d"'
' seconds="%f" in="%d" out="%d"' % (
self.server_dispatched,
(when - self.server_when),
self.ac_in_meter,
self.ac_out_meter
), 'info')
self.server_when = 0.0
self.server_dispatched = \
self.ac_in_meter = self.ac_out_meter = 0
def server_shutdown (self):
"stop accepting connections, close all current when done"
self.log ('shutdown', 'info')
if self.server_when:
self.accepting = False
for dispatcher in tuple (self.server_dispatchers):
dispatcher.close_when_done ()
else:
self.handle_close ()
return True
def anonymous (listen):
"allways resolved to the empty string"
listen.server_resolved = (lambda addr: '')
listen.server_resolve = None
return listen
def accept_all (listen):
listen.server_accepted = (lambda conn, addr, name: True)
return listen
def accept_named (listen, limit):
def accepted (conn, addr, name):
if listen.server_named[name] <= limit:
return True
if listen.server_named[name] > 1:
listen.server_named[name] -= 1
else:
del listen.server_named[name]
assert None == listen.log (
'accept-limit ip="%s"' % name,
'error'
)
return False
listen.server_accepted = accepted
return listen
def meter (dispatcher, when):
"decorate a server dispatcher with stream meters"
async_limits.meter_recv (dispatcher, when)
async_limits.meter_send (dispatcher, when)
def close ():
del (
dispatcher.recv,
dispatcher.send,
dispatcher.close
)
dispatcher.close ()
dispatcher.async_server.server_close (dispatcher)
dispatcher.close = close
def metered (listen, timeout=1<<32):
"meter I/O for server streams"
def decorate (dispatcher, when):
meter (dispatcher, when)
dispatcher.limit_inactive = timeout
listen.server_decorate = decorate
listen.server_inactive = timeout
listen.server_limit = None
return listen
def inactive (listen, timeout):
"meter I/O and limit inactivity for server streams"
assert type (timeout) == int and timeout > 0
def decorate (dispatcher, when):
meter (dispatcher, when)
dispatcher.limit_inactive = listen.server_inactive
listen.server_decorate = decorate
listen.server_inactive = timeout
listen.server_limit = async_limits.inactive
return listen
def limited (listen, timeout, inBps, outBps):
"throttle I/O and limit inactivity for managed client streams"
assert (
type (timeout) == int and timeout > 0 and
type (inBps ()) == int and inBps () > 0 and
type (outBps ()) == int and outBps () > 0
)
def throttle (dispatcher, when):
"decorate a client dispatcher with stream limits"
async_limits.meter_recv (dispatcher, when)
async_limits.meter_send (dispatcher, when)
dispatcher.limit_inactive = timeout
async_limits.throttle_readable (
dispatcher, when, listen.ac_in_throttle_Bps
)
async_limits.throttle_writable (
dispatcher, when, listen.ac_out_throttle_Bps
)
def close ():
del (
dispatcher.recv,
dispatcher.send,
dispatcher.readable,
dispatcher.writable,
dispatcher.close
)
dispatcher.close ()
dispatcher.async_server.server_close (dispatcher)
dispatcher.close = close
listen.server_decorate = throttle
listen.ac_in_throttle_Bps = inBps
listen.ac_out_throttle_Bps = outBps
listen.server_limit = async_limits.limit
return listen
def rationed (listen, timeout, inBps, outBps):
"ration I/O and limit inactivity for managed client streams"
assert (
type (timeout) == int and timeout > 0 and
type (inBps) == int and inBps > 0 and
type (outBps) == int and outBps > 0
)
listen.ac_in_ration_Bps = inBps
listen.ac_out_ration_Bps = outBps
def throttle_in ():
return int (listen.ac_in_ration_Bps / max (len (
listen.server_dispatchers
), 1))
def throttle_out ():
return int (listen.ac_out_ration_Bps / max (len (
listen.server_dispatchers
), 1))
limited (listen, timeout, throttle_in, throttle_out)
return listen
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/async_server.py
|
async_server.py
|
"http://laurentszyster.be/blog/collector/"
from allegra import async_chat, loginfo
class Null (object):
# collect data to /dev/null
collector_is_simple = True
def collect_incoming_data (self, data):
return
def found_terminator (self):
return True
DEVNULL = Null ()
class Loginfo (object):
# collect data to loginfo
collector_is_simple = True
def __init__ (self, info=None):
self.info = info
def collect_incoming_data (self, data):
loginfo.log (data, self.info)
def found_terminator (self):
return True # final!
LOGINFO = Loginfo ()
class File (object):
collector_is_simple = True
def __init__ (self, file):
self.file = file
self.collect_incoming_data = self.file.write
def found_terminator (self):
self.file.close ()
self.collect_incoming_data = None
return True
def devnull (data): pass
class Limited (object):
collector_is_simple = True
def __init__ (self, limit):
self.data = ''
self.limit = limit
def collect_incoming_data (self, data):
self.limit -= len (data)
if self.limit > 0:
self.data += data
else:
self.collect_incoming_data = devnull
#
# just don't do anything after the limit, not even
# testing for it ;-)
def found_terminator (self):
return True
class Codec_decoder (object):
# Decode collected data using the codecs' decode interface:
#
# import codecs
# Codec_decoder (collector, codecs.lookup ('zlib')[1])
#
# Note that the decode function *must* decode byte strings, not
# UNICODE strings.
collector_is_simple = True
def __init__ (self, collector, decode):
assert collector.collector_is_simple
self.collector = collector
self.decode = decode
self.buffer = ''
def collect_incoming_data (self, data):
if self.buffer:
decoded, consumed = self.decode (self.buffer + data)
consumed -= len (self.buffer)
else:
decoded, consumed = self.decode (data)
self.collector.collect_incoming_data (decoded)
if consumed < len (data) + 1:
self.buffer = data[consumed:]
def found_terminator (self, data):
if self.buffer:
decoded, consumed = self.decode (self.buffer)
if decoded:
self.collector.collect_incoming_data (decoded)
return self.collector.found_terminator ()
class Padded_decoder (object):
# Collect padded blocks to decode, for instance:
#
# import base64
# Padded_decoder (collector, 20, base64.b64decode)
#
# because padding does matter to the base binascii implementation,
# and is not handled by the codecs module, a shame when a large
# XML string is encoded in base64 and should be decoded and parsed
# asynchronously. Padding is also probably a requirement from block
# cypher protocols and the likes.
collector_is_simple = True
def __init__ (self, collector, padding, decode):
assert collector.collector_is_simple
self.collector = collector
self.padding = padding
self.decode = decode
self.buffer = ''
def collect_incoming_data (self, data):
lb = len (self.buffer) + len (data)
if lb < self.padding:
self.buffer += data
return
tail = lb % self.padding
if self.buffer:
if tail:
self.buffer = data[-tail:]
self.collector.collect_incoming_data (
self.decode (
self.buffer + data[:-tail]
)
)
else:
self.collector.collect_incoming_data (
self.decode (self.buffer + data)
)
elif tail:
self.buffer = data[-tail:]
self.collector.collect_incoming_data (
self.decode (data[:-tail])
)
else:
self.collector.collect_incoming_data (
self.decode (data)
)
def found_terminator (self):
if self.buffer:
self.collector.collect_incoming_data (
self.decode (self.buffer)
)
self.buffer = ''
return self.collector.found_terminator ()
class Simple (object):
collector_is_simple = True
terminator = None
buffer = ''
def get_terminator (self):
return self.terminator
def set_terminator (self, terminator):
self.terminator = terminator
def collect_incoming_data (self, data):
self.buffer = async_chat.collect_chat (
self.collector, self.buffer + data
)
def found_terminator (self):
if self.buffer:
async_chat.collect_chat (self.collector, self.buffer)
return True # allways final
def bind_simple (cin, cout):
"bind to a simple collector until found_terminator is True"
def found_terminator ():
if cout.found_terminator ():
del (
cin.collect_incoming_data,
cin.found_terminator
)
return cin.found_terminator ()
return False
cin.collect_incoming_data = cout.collect_incoming_data
cin.found_terminator = found_terminator
return cin
def bind_complex (cin, cout):
"bind to a complex collector until found_terminator is True"
cout.set_terminator = cin.set_terminator
cout.get_terminator = cin.get_terminator
cout.collector = cin
cin.set_terminator (cout.get_terminator ())
cin.collect_incoming_data = cout.collect_incoming_data
def found_terminator ():
if cout.found_terminator ():
del (
cout.set_terminator,
cout.get_terminator,
cout.collector,
cin.collect_incoming_data,
cin.found_terminator
)
return cin.found_terminator ()
return False
cin.found_terminator = found_terminator
return cin
def bind (cin, cout):
"bind a complex to a collector until found_terminator is True"
assert not cin.collector_is_simple
if cout.collector_is_simple:
return bind_simple (cin, cout)
return bind_complex (cin, cout)
def simplify (cin, cout):
couple = Simple ()
bind_complex (couple, cout)
return bind_simple (cin, couple)
def simple (collected):
return bind_simple (Simple (), collected)
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/collector.py
|
collector.py
|
# Copyright (C) 2005 Laurent A.V. Szyster
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"http://laurentszyster.be/blog/select_trigger/"
import sys, os, socket, thread
from allegra import (
netstring, prompt, loginfo,
async_loop, finalization, async_core
)
class Trigger (async_core.Dispatcher):
"Thunk back safely from threads into the asynchronous loop"
def __repr__ (self):
return 'trigger id="%x"' % id (self)
def readable (self):
"a Trigger is allways readable"
return True
def writable (self):
"a Trigger is never writable"
return False
def handle_connect (self):
"pass on connect"
pass
def handle_read (self):
"try to call all thunked, log all exceptions' traceback"
try:
self.recv (8192)
except socket.error:
return
self.lock.acquire ()
try:
thunks = self.thunks
self.thunks = []
finally:
self.lock.release ()
for thunk in thunks:
try:
thunk[0] (*thunk[1])
except:
self.loginfo_traceback ()
if os.name == 'posix':
def posix_trigger_init (self):
"use a POSIX pipe to connect a pair of file descriptors"
self.select_triggers = 0
fd, self.trigger = os.pipe ()
self.set_file (fd)
self.lock = thread.allocate_lock ()
self.thunks = []
assert None == self.log ('open', 'debug')
def posix_trigger_pull (self, thunk):
"acquire the trigger's lock, thunk and pull"
self.lock.acquire ()
try:
self.thunks.append (thunk)
finally:
self.lock.release ()
os.write (self.trigger, 'x')
def posix_trigger_close (self):
"close the trigger"
async_core.Dispatcher.close (self)
os.close (self.trigger)
Trigger.__init__ = posix_trigger_init
Trigger.__call__ = posix_trigger_pull
Trigger.close = posix_trigger_close
elif os.name == 'nt':
def win32_trigger_init (self):
"get a pair of Win32 connected sockets"
self.select_triggers = 0
a = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
w = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
a.bind (('127.9.9.9', 19999))
a.listen (1)
w.setblocking (0)
try:
w.connect (('127.9.9.9', 19999))
except:
pass
conn, addr = a.accept ()
a.close ()
w.setblocking (1)
self.trigger = w
self.set_connection (conn, addr)
self.lock = thread.allocate_lock ()
self.thunks = []
assert None == self.log ('open', 'debug')
def win32_trigger_pull (self, thunk):
"acquire the trigger's lock, thunk and pull"
self.lock.acquire ()
try:
self.thunks.append (thunk)
finally:
self.lock.release ()
self.trigger.send ('x')
def win32_trigger_close (self):
"close the trigger"
async_core.Dispatcher.close (self)
self.trigger.close ()
Trigger.__init__ = win32_trigger_init
Trigger.__call__ = win32_trigger_pull
Trigger.close = win32_trigger_close
else:
raise ImportError ('OS "%s" not supported, sorry :-(' % os.name)
class Select_trigger (loginfo.Loginfo, finalization.Finalization):
"""A base class that implements the select_trigger interface
select_trigger ((function, args))
to thunk function and method calls from one thread into the main
asynchronous loop. Select_trigger implements thread-safe and
practical loginfo interfaces:
select_trigger_log (data, info=None)
to log information, and
select_trigger_traceback ()
to log traceback asynchronously from a distinct thread."""
select_trigger = None
def __init__ (self):
"maybe open a new Trigger, increase its reference count"
if self.select_trigger == None:
Select_trigger.select_trigger = Trigger ()
self.select_trigger.select_triggers += 1
def __repr__ (self):
return 'select-trigger id="%x"' % id (self)
def select_trigger_log (self, data, info=None):
"log asynchronously via the select trigger"
self.select_trigger ((self.log, (data, info)))
def select_trigger_traceback (self):
"return a compact traceback tuple and log asynchronously"
ctb = prompt.compact_traceback ()
self.select_trigger ((
self.loginfo_log,
loginfo.traceback_encode (ctb)
))
return ctb
def finalization (self, finalized):
"decrease the Trigger's reference count, maybe close it"
trigger = self.select_trigger
trigger.select_triggers -= 1
if trigger.select_triggers == 0:
trigger ((trigger.handle_close, ()))
Select_trigger.select_trigger = None
self.select_trigger = None
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/select_trigger.py
|
select_trigger.py
|
"http://laurentszyster.be/blog/timeouts/"
import time, collections
from allegra import async_loop
class Timeouts (object):
def __init__ (self, period, precision=None):
# self.timeouts_timeout = timeout
self.timeouts_period = max (period, async_loop.precision)
self.timeouts_precision = precision or async_loop.precision
self.timeouts_deque = collections.deque ()
def timeouts_push (self, reference):
when = time.time ()
if not self.timeouts_deque:
self.timeouts_start (when)
self.timeouts_deque.append ((when, reference))
return reference
def timeouts_start (self, when):
async_loop.schedule (
when + self.timeouts_precision, self.timeouts_poll
)
def timeouts_poll (self, now):
then = now - self.timeouts_precision - self.timeouts_period
while self.timeouts_deque:
when, reference = self.timeouts_deque[0]
if when < then:
self.timeouts_deque.popleft ()
self.timeouts_timeout (reference)
else:
break
if self.timeouts_deque:
return (
now + self.timeouts_precision,
self.timeouts_poll
)
self.timeouts_stop ()
def timeouts_stop (self):
pass # self.timeouts_timeout = None
# The first, simplest and probably most interesting application of Timeouts
def cached (cache, timeout, precision):
def timedout (reference):
try:
del cache[reference]
except KeyError:
pass
t = Timeouts (timeout, precision)
t.timeouts_timeout = timedout
def push (key, value):
cache[key] = value
t.timeouts_push (key)
return push
# push, stop = timeouts.cached ({}, 60, 6)
# ...
# push (key, value)
# ...
# stop ()
# Note about this implementation
#
# Time out
#
# In order to scale up and handle very large number of timeouts scheduled
# asynchronously at fixed intervals, this module provides a simple deque
# interface that for a fifo of timeout events to poll from.
#
# Polling a timeouts queue should be scheduled recurrently at more or less
# precise intervals depending on the volume expected and the time it takes
# to handle each timeout. Your mileage may vary, but this design will scale
# up in the case of long intervals, when for each time poll only a few first
# events at the left of the deque have timed-out.
#
# The timouts interface is applied by pns_udp to manage the 3 second timeout
# set on each statement relayed or routed by a PNS/UDP circle. There might
# be other applications, RTP protocols for instance.
|
Allegra
|
/Allegra-0.63.zip/Allegra-0.63/lib/timeouts.py
|
timeouts.py
|
import os
import configparser
import logging
import signal
import subprocess
from asyncio import get_event_loop
from sanic import Sanic
from sanic.response import json
from sanic.config import Config
from sanic.response import text
from .controller import BaseView
class Allegro(object):
def __init__(self, name):
self.log = logging.getLogger('allegro')
formatter = logging.Formatter(
"%(asctime)s: %(levelname)s: %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.setLevel(logging.INFO)
self.app = Sanic(name)
def initialize(self, config_path):
try:
self.cf = configparser.ConfigParser()
self.cf.read(config_path)
self.host = self.cf["basic"]["bind_host"]
self.port = int(self.cf["basic"]["bind_port"])
self.root_path = self.cf["basic"]["root_path"]
self.api_worker = int(self.cf["basic"]["api_worker"])
self.pid_path = self.cf["basic"]["pid_path"]
self.timeout = int(self.cf["basic"]["timeout"])
self.app.config.REQUEST_TIMEOUT=self.timeout
except Exception as e:
self.log.exception("Error ocurred when trying to the config file Info: %s" % str(e))
raise
def init_route(self):
services = self.cf["service"]["keys"].replace(' ', '').split(',')
for service in services:
uri = self.cf[service]["uri"]
module = self.cf[service]["module"]
file_upload = eval(self.cf[service]["file_upload_enabled"])
files_save_path = ""
if file_upload:
files_save_path = self.cf[service]["files_save_path"]
method = self.cf[service]["method"].lower().replace(' ','').split(',')
self.app.add_route(BaseView.as_view(method, module, self.root_path, self.timeout, file_upload, files_save_path), uri)
def start(self):
try:
self.init_route()
with open(self.pid_path, "a") as f:
f.write(str(os.getpid())+"\n")
services = self.cf["service"]["keys"].split(',')
os.system('cd %s' % self.root_path)
for service in services:
module = self.cf[service]["module"]
eventlet_enabled = eval(self.cf[service]['eventlet_enabled'])
if eventlet_enabled:
eventlet_pool = int(self.cf[service]["eventlet_pool"])
subprocess.call('celery worker -A %s --concurrency 1 -l info -P eventlet -c %s -n %s &' % (module, eventlet_pool, module), shell=True)
else:
workers = int(self.cf[service]["workers"])
for i in range(workers):
subprocess.call('celery worker -A %s --concurrency %s -l info -n %s%s &' % (module, workers, module, i), shell=True)
self.log.info("Starting Consumer service...")
self.app.add_task(self.save_pid())
self.app.run(host=self.host, port=self.port, workers=self.api_worker)
except Exception as e:
self.log.exception("Error ocurred when trying to the config file Info: %s" % str(e))
raise
async def save_pid(self):
with open(self.pid_path, "a") as f:
f.write(str(os.getpid())+"\n")
def stop(self):
try:
self.log.info("Staring to termina the processes...")
with open(self.pid_path, "r") as f:
processes = f.readlines()
for p in processes:
try:
a = os.kill(int(p[:-1]), signal.SIGKILL)
except Exception as e:
self.log.error(e)
open(self.pid_path, "w+")
os.system("pkill -9 -f 'celery worker'")
self.log.info("All the processes are terminated.")
except Exception as e:
log.exception("Error ocurred when trying to kill the processes Info: %s" % str(e))
raise
|
Allegro
|
/Allegro-1.2.2.tar.gz/Allegro-1.2.2/allegro/app.py
|
app.py
|
import time
import imp
import asyncio
import aiofiles
from sanic.views import HTTPMethodView
from sanic.response import text
from sanic.response import json
from sanic.exceptions import ServerError
class BaseView(HTTPMethodView):
def __init__(self, method, module, path, timeout, file_upload, file_save_path):
self.method = method
self.file_upload = file_upload
self.file_save_path = file_save_path
try:
file, pathname, desc = imp.find_module(module,[path])
self.moduleobj = imp.load_module(module, file, pathname, desc)
except Exception as e:
raise
def request_to_message(self, request):
message = dict()
try:
message["url_content"] = request.args
message["form_content"] = request.form
message["json_content"] = request.json
except Exception:
pass
return message
def return_check(self, response):
if isinstance(response, dict):
return json(response)
else:
raise ServerError("The `dict` is expected. Please check the type of the callback", status_code=401)
async def get(self, request):
if 'get' not in self.method:
return ServerError("Not support", status_code=400)
message = self.request_to_message(request)
handler = "self.moduleobj.get.delay"
callback = eval(handler)(message)
while(not callback.ready()):
await asyncio.sleep(1)
response = callback.result
return self.return_check(response)
async def post(self, request):
if 'post' not in self.method:
return ServerError("Not support", status_code=400)
if self.file_upload:
try:
file = request.files.get('file')
if not file:
raise ServerError('Not file found', status_code=400)
async with aiofiles.open(self.file_save_path + file.name, 'wb+') as f:
await f.write(file.body)
return self.return_check({"info":"Upload file successfully", "state":1})
except Exception as e:
print(e)
return self.retuen_check({"info":str(e),"state":0})
else:
message = self.request_to_message(request)
if self.file_upload:
message = request
handler = "self.moduleobj.post.delay"
callback = eval(handler)(message)
while(not callback.ready()):
await asyncio.sleep(1)
response = callback.result
return self.return_check(response)
async def put(self, request):
if 'put' not in self.method:
return ServerError("Not support", status_code=400)
message = self.request_to_message(request)
handler = "self.moduleobj.put.delay"
callback = eval(handler)(message)
while(not callback.ready()):
await asyncio.sleep(1)
response = callback.result
return self.return_check(response)
async def patch(self, request):
if 'patch' not in self.method:
return ServerError("Not support", status_code=400)
message = self.request_to_message(request)
handler = "self.moduleobj.patch.delay"
callback = eval(handler)(message)
while(not callback.ready()):
await asyncio.sleep(1)
response = callback.result
return self.return_check(response)
async def delete(self, request):
if 'delete' not in self.method:
return ServerError("Not support", status_code=400)
message = self.request_to_message(request)
handler = "self.moduleobj.delete.delay"
callback = eval(handler)(message)
while(not callback.ready()):
await asyncio.sleep(1)
response = callback.result
return self.return_check(response)
|
Allegro
|
/Allegro-1.2.2.tar.gz/Allegro-1.2.2/allegro/controller.py
|
controller.py
|
# AlleleFinder
[](https://circleci.com/gh/OLC-LOC-Bioinformatics/AzureStorage/tree/main)
[](https://codecov.io/gh/OLC-Bioinformatics/AlleleFinder)
[](https://anaconda.org/olcbioinformatics/allelefinder)
[](https://badge.fury.io/gh/olc-bioinformatics%2Fallelefinder)
[](https://github.com/OLC-LOC-Bioinformatics/AzureStorage/issues)
[](https://OLC-Bioinformatics.github.io/AlleleFinder/?badge=stable)
[](https://github.com/OLC-Bioinformatics/AlleleFinder/blob/main/LICENSE)
### STEC AlleleFinder
A suite of tools, written in Python, designed for the discovery, sequence typing, and profiling the _stx_ alleles in Shiga toxin-producing _Escherichia coli_
## Scripts
There is a single STEC script with six separate functionalities:
1. [`profile_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/profile_reduce)
2. [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce)
3. [`allele_find`](https://olc-bioinformatics.github.io/AlleleFinder/allele_find)
4. [`aa_allele_find`](https://olc-bioinformatics.github.io/AlleleFinder/aa_allele_find)
5. [`allele_split`](https://olc-bioinformatics.github.io/AlleleFinder/allele_split)
6. [`allele_concatenate`](https://olc-bioinformatics.github.io/AlleleFinder/allele_concatenate)
Full documentation is available at the [AlleleFinder GitHub pages site](https://olc-bioinformatics.github.io/AlleleFinder/)
## Quick Start
[`Conda`](https://docs.conda.io/en/latest/) is required to install AlleleFinder. See the [documentation](http://bioconda.github.io/) or [AlleleFinder installation](https://olc-bioinformatics.github.io/AlleleFinder/install/) for instructions of getting conda installed on your system
Create a new conda environment:
```
conda create -n allele_finder -c olcbioinformatics allelefinder=0.1.2=py_0
```
Additional documentation is available [here](https://olc-bioinformatics.github.io/AlleleFinder/installation)
## Reduce profiles
This script reduces full wgMLST profile from Enterobase using genes of interest.
The two _stx_ genes, _stx1_ and _stx2_, have the following identifiers in Enterobase:
* stx1 subunit A: **ECs2974**
* stx1 subunit B: **ECs2973**
* stx2 subunit A: **ECs1205**
* stx2 subunit B: **ECs1206**
#### Inputs
In order to extract all the unique profiles from a full Enterobase wgMLST profile for both _stx1_ subunits, create a text
file containing the two identifiers (one per row) e.g.:
`genes.txt`
```
ECs2974
ECs2973
```
A full _Escherichia_ wgMLST profile file from Enterobase is also required
#### Running the script
```
stec.py profile_reduce -p /path/to/profile_file -g /path/to/genes_file -o /path/to/output_folder
```
Additional information regarding this functionality is available in the [`profile_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/profile_reduce) documentation.
## Translate and reduce alleles
This script translates allele files from Enterobase in nucleotide format to amino acid, performs content and length checks, and removes duplicates.
In order for a translated allele to pass content and length checks, it must:
1. Start with a `Methionine` residue
2. Pass a minimum length threshold after trimming:
* The length thresholds are:
* ECs2973: 90 amino acid residues
* ECs2974: 316 amino acid residues
* ECs1205: 320 amino acid residues
* ECs1206: 88 amino acid residues
3. Not be a duplicate of an allele already in the reduced database
#### Inputs
1. nucleotide allele files from Enterobase in FASTA format.
2. reduced profile file prepared by [`profile_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/profile_reduce). Note that the allele files must contain sequence for the same genes that were used for the reduction of the profile, e.g.:
* ECs2973
* ECs2974
#### Running the script
```
stec.py allele_translate_reduce -a /path/to/allele_folder -p /path/to/profile_file -r /path/to/output/folder/aa_profile -t /path/to/output/folder/aa_alleles
```
Additional information regarding this functionality is available in the [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce) documentation
## Find alleles
This script performs BLAST analyses on a nucleotide allele database prepared by [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce) against nucleotide query sequences to discover their sequence types. Updates nucleotide and amino acid profiles and allele databases
#### Inputs
1. nucleotide query files in FASTA format
2. all outputs from [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce)
#### Running the script
```
stec.py allele_find --nt_profile /path/to/nt_profile_file --aa_profile /path/to_aa_profile_file --nt_alleles /path/to/nt_allele_folder --aa_alleles /path/to/aa_allele_folder -r /path/to/output_folder -q /path/to/query_folder
```
Additional information regarding this functionality is available in the [`allele_find`](https://olc-bioinformatics.github.io/AlleleFinder/allele_find) documentation
## Find alleles from amino query files
This script performs BLAST analyses on an amino acid database prepared by [`allele_find`](https://olc-bioinformatics.github.io/AlleleFinder/allele_find) against amino acid query sequences to find matching alleles. Updates allele database.
#### Inputs
1. amino acid query files in FASTA format. One query allele per file. Note that the allele naming scheme must match the outputs from the previous scripts
2. amino acid allele database prepared by [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce) or [`allele_find`](https://olc-bioinformatics.github.io/AlleleFinder/allele_find)
#### Running the script
```
stec.py aa_allele_find --aa_alleles /path/to/aa_allele_folder -r /path/to/output_folder -q /path/to/query_folder
```
Additional information regarding this functionality is available in the [`aa_allele_find`](https://olc-bioinformatics.github.io/AlleleFinder/aa_allele_find) documentation
## Split allele database
This script splits a single allele database file into multiple files; one sequence per file
#### Inputs
1. allele database file
#### Running the script
```
stec.py allele_split -q /path/to/query_folder -o /path/to_output_folder
```
Additional information regarding this functionality is available in the [`allele_split`](https://olc-bioinformatics.github.io/AlleleFinder/allele_split) documentation
## Concatenate allele database
This script concatenates alleles of the _stx_ A and B subunits into a single sequence with a linker
1. nucleotide and amino acid allele files prepare by [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce)
2. nucleotide and amino acid profile files prepared by [`allele_translate_reduce`](https://olc-bioinformatics.github.io/AlleleFinder/allele_translate_reduce). Note that the allele files must contain sequence for the same genes that were used for the reduction of the profile, e.g.:
#### Running the script
```
stec.py allele_concatenate --nt_profile /path/to/nt_profile/profile.txt --aa_profile /path/to/aa_profile/profile.txt --nt_alleles /path/to/nt_alleles --aa_alleles /path/to/aa_alleles -c /path/to/outputs
```
## Feedback
If you encounter any issues installing or running AlleleFinder, have feature requests, or need assistance, please [open an issue on GitHub](https://github.com/OLC-Bioinformatics/AlleleFinder/issues/new/choose)
## License
MIT License
Copyright (c) Government of Canada 2023
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/README.md
|
README.md
|
# Standard imports
from csv import DictReader
from glob import glob
import logging
import json
import os
# Third party imports
from Bio.Seq import Seq
from Bio import SeqIO
# Local imports
from allele_tools.methods import \
extract_novel_alleles
from olctools.accessoryFunctions.accessoryFunctions import combinetargets
from genemethods.geneseekr.geneseekr import GeneSeekr
__author__ = 'adamkoziol'
def allele_prep(allele_path, gene_names, combined_targets, amino_acid):
"""
Create a 'combinedtargets.fasta' file
:param allele_path: Name and absolute path to folder containing allele database files
:param gene_names: List of all gene names in the analysis
:param combined_targets: String of absolute path to the combinedtargets.fasta file
:param amino_acid: Boolean of whether the query sequences are amino acid or nucleotide
:return: records: Dictionary of allele_header:allele_sequence
:return: gene_names: List of all gene names extracted from allele headers
:return: data: String of header including all gene names. To be used in creating final report
"""
logging.info('Reading allele files')
custom_targets = os.path.join(allele_path, 'custom.tfa')
combined_targets_db = glob(os.path.join(allele_path, 'combinedtargets*'))
records = {}
# Clear out any previously created combinedtargets files (and associated BLAST databases)
clear_alleles(combined_targets_db=combined_targets_db,
custom_targets=custom_targets)
alleles = glob(os.path.join(allele_path, '*.*fa*'))
# If the dictionary hasn't already been populated by a previous iteration, remove the path and
# the extension from each of the allele files to determine gene names
if not gene_names:
for allele in alleles:
gene_names.append(os.path.splitext(os.path.basename(allele))[0].replace('_alleles', ''))
# Populate the header string for the final report
genes = '\t'.join(sorted(gene_names))
data = f'ST\t{genes}\n'
# Create the combinedtargets file
if not os.path.isfile(combined_targets):
if amino_acid:
combinetargets(
targets=alleles,
targetpath=allele_path,
mol_type='prot'
)
else:
combinetargets(
targets=alleles,
targetpath=allele_path
)
# Create BLAST databases
GeneSeekr.makeblastdb(
fasta=combined_targets,
program='blastn' if not amino_acid else 'blastp'
)
# Load all the FASTA records from the combinedtargets file
for allele_file in alleles:
for record in SeqIO.parse(allele_file, 'fasta'):
records[record.id] = str(record.seq)
return records, gene_names, data
def clear_alleles(combined_targets_db, custom_targets):
"""
Remove any combinedtargets.fasta or custom.tfa files present in the allele path
:param combined_targets_db: List of combinedtargets files, including BLAST database files
:param custom_targets: Name and absolute path to custom.tfa target file
:return:
"""
# Iterate through all the combinedtargets files
for combined_target in combined_targets_db:
# Remove each file
os.remove(combined_target)
# Attempt to remove the targets.tfa file
try:
os.remove(custom_targets)
except FileNotFoundError:
pass
def read_profile(profile_file):
"""
Load any previously created profiles for this analysis
:param profile_file: Name and absolute path to a profile file
"""
# Initialise the dictionary
profile_data = {}
# Only load the profile file if it exists
if os.path.isfile(profile_file):
logging.info('Extracting profiles from profile file')
# Open an Excel-formatted sequence profile file as a dictionary
profile = DictReader(open(profile_file, encoding='utf-8'), dialect='excel-tab')
# Iterate through the rows
for row in profile:
# Populate the profile dictionary with profile number: {gene: allele}.
allele_comprehension = {gene: allele for gene, allele in row.items() if gene != 'ST'}
# Extract the sequence type number from the first field name
seq_type = row[profile.fieldnames[0]]
# Update the profile data dictionary
profile_data[seq_type] = allele_comprehension
return profile_data
def parseable_blast_outputs(runmetadata, fieldnames, extended_fieldnames, records, cutoff=90):
"""
Add a header to the BLAST report, so that it is easier to figure out what is in each column
:param runmetadata: Metadata object containing a list of all metadata objects
:param fieldnames: String of all the field names in the BLAST report
:param extended_fieldnames: String of the BLAST field names plus the calculated percent identity
:param records: Dictionary of allele name: allele sequence
:param cutoff: Integer of the minimum percent identity between query and subject sequence. Default is 90
"""
logging.info('Adding headers to BLAST outputs')
for sample in runmetadata.samples:
data = []
# Load the first line of the report
with open(sample.alleles.blast_report, 'r', encoding='utf-8') as report:
header_line = report.readline().strip()
# Split the header on tabs
header_list = header_line.split('\t')
# Check to see if the header has already been added. Skip this step if it has been added.
if header_list[0] != fieldnames[0]:
with open(sample.alleles.blast_report, 'r', encoding='utf-8') as report:
header = list(report.readline().split('\t'))
if len(header) == len(fieldnames):
current_fieldnames = fieldnames
else:
current_fieldnames = extended_fieldnames
blastdict = DictReader(open(sample.alleles.blast_report, encoding='utf-8'),
fieldnames=current_fieldnames,
dialect='excel-tab'
)
# Go through each BLAST result
for row in blastdict:
# Calculate the percent identity and extract the bit score from the row
# Percent identity is the (length of the alignment - num mismatches) /
# total query length
percent_identity = float(
'{:0.2f}'.format((float(row['identical']) - float(row['gaps'])) /
len(records[row['subject_id']]) * 100)
)
# Filter the results based on the cutoff value
if percent_identity < cutoff:
continue
# Create a percent match entry based on the calculated percent identity match
row['percent_match'] = str(percent_identity)
# Add the updated row to the list
data.append(row)
# Overwrite the original BLAST outputs to include headers, and the percent match
with open(sample.alleles.blast_report, 'w', encoding='utf-8') as updated_report:
# Add the header
headers = '\t'.join(extended_fieldnames)
updated_report.write(f'{headers}\n')
# Add the results
for row in data:
for header in extended_fieldnames:
# Write the value from the row with the header as the key
try:
updated_report.write('{value}\t'.format(value=row[header]))
except KeyError:
# noinspection PyTypeChecker
updated_report.write('{value}\t'.format(value=''.join(row[None])))
# Add a newline for each result
updated_report.write('\n')
def parse_results(runmetadata, fieldnames, extended_fieldnames, amino_acid, genome_query=False):
"""
Parse the BLAST results, and populate GenObjects
:param runmetadata: Metadata object containing list of all metadata objects
:param fieldnames: String of all the field names in the BLAST report
:param extended_fieldnames: String of the BLAST field names plus the calculated percent identity
:param amino_acid: Variable on whether targets are protein
:param genome_query: Boolean of whether the query is a genome, and the subject is the allele
:return: Updated runmetadata
"""
logging.info('Parsing BLAST outputs')
for sample in runmetadata.samples:
# Initialise GenObjects as required
sample.alleles.blastlist = []
sample.alleles.targetsequence = {}
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(sample.alleles.blast_report, encoding='utf-8'),
fieldnames=extended_fieldnames,
dialect='excel-tab')
resultdict = {}
# Go through each BLAST result
for row in blastdict:
# Ignore the headers
if row['query_id'].startswith(fieldnames[0]):
pass
else:
target_id = row['query_id'] if not genome_query else row['subject_id']
target_start = row['subject_start']
target_end = row['subject_end']
target_seq = row['query_sequence']
# Remove unwanted pipes added to the name
target = target_id.lstrip('gb|').rstrip('|') if '|' in target_id else \
target_id
# If the percent identity is equal to the cutoff
if float(row['percent_match']) == 100:
# Append the hit dictionary to the list
sample.alleles.blastlist.append(row)
# Update the dictionary with the target and percent identity
resultdict.update({target: row['percent_match']})
# Determine if the orientation of the sequence is reversed compared to
# the reference sequence
if int(target_end) < int(target_start) and not amino_acid:
# Create a sequence object using BioPython
seq = Seq(target_seq)
# Calculate the reverse complement of the sequence
querysequence = str(seq.reverse_complement())
# If the sequence is not reversed, use the sequence as it is in the output
else:
querysequence = target_seq
# Add the sequence in the correct orientation to the sample
try:
sample.alleles.targetsequence[target].append(querysequence)
except (AttributeError, KeyError):
sample.alleles.targetsequence[target] = []
sample.alleles.targetsequence[target].append(querysequence)
# Add the percent identity to the object
sample.alleles.blastresults = resultdict
# Populate missing results with 'NA' values
if len(resultdict) == 0:
sample.alleles.blastresults = 'NA'
return runmetadata
def profile_alleles(runmetadata, profile_dict, profile_set, records, amino_acid=False,
novel_alleles=False, genome_query=False, allele_path=None, report_path=None,
cutoff=75):
"""
Create the gene:allele profile from the BLAST outputs from each sample
:param runmetadata: Metadata object containing a list of all metadata objects
:param profile_dict: Dictionary to store gene:allele profile for each sample
:param profile_set: List of all unique profiles
:param records: List of all gene names
:param novel_alleles: Boolean of whether novel alleles should be extracted from BLAST hit if
there is no 100% match
:param genome_query: Boolean of whether the query is a genome, and the subject is the allele
:param amino_acid: Variable on whether targets are protein
:param allele_path: Name and absolute path to folder containing allele database files
:param report_path: Name and absolute path to folder in which reports are to be created
:param cutoff: Integer of the minimum percent identity between query and subject sequence. Default is 75
:return: Updated profile_dict and profile_set
"""
logging.info('Determining allele profiles')
# Iterate through all the samples
for sample in runmetadata.samples:
# Initialise a dictionary to store the profile information for each samples
profile_dict[sample.name] = {}
# Initialise a dictionary to store the gene:allele combinations for each sample
allele_comprehension = {}
# Each gene in the analysis is stored in the list of genes created in allele_finder
for gene in records:
# Create a variable to track whether the current gene is present in the current sample
present = False
# Iterate through all the BLAST outputs for the sample
for allele in sample.alleles.blastresults:
# If the gene name is present as a substring of the allele e.g. adk in adk_1,
# then the gene is present in the BLAST outputs
if gene in allele:
# Strip off the allele number from the allele e.g. adk_1 yields 1
allele_id = allele.split('_')[-1]
# Update the dictionary with the new gene: allele number for the sample
allele_comprehension.update({gene: allele_id})
# Update the gene presence variable
present = True
# If, after iterating through all the BLAST outputs, the gene is not present in the
# sample, update the gene: allele to reflect this absence
if not present:
if novel_alleles:
sample, novel_allele, query_sequence = extract_novel_alleles(
sample=sample,
gene=gene,
genome_query=genome_query,
amino_acid=amino_acid,
allele_path=allele_path,
report_path=report_path,
cutoff=cutoff,
)
try:
sample.alleles.targetsequence[novel_allele].append(query_sequence)
except KeyError:
sample.alleles.targetsequence[novel_allele] = [query_sequence]
if novel_allele:
allele_comprehension.update({gene: novel_allele.split('_')[-1]})
else:
allele_comprehension.update({gene: '0'})
else:
# Set missing alleles to 'ND'
allele_comprehension.update({gene: '0'})
# In order to hash the dictionary, use JSON, with sorted keys to freeze it
frozen_allele_comprehension = json.dumps(allele_comprehension, sort_keys=True)
# Update the dictionary of profiles with the hash of the frozen dictionary: list of
# samples with that hash
if hash(frozen_allele_comprehension) not in profile_dict:
profile_dict[hash(frozen_allele_comprehension)] = [sample.name]
else:
profile_dict[hash(frozen_allele_comprehension)].append(sample.name)
# Add the 'regular' dictionary to the list of all profiles as required
if allele_comprehension not in profile_set:
profile_set.append(allele_comprehension)
return profile_dict, profile_set
def match_profile(profile_data, profile_dict, profile_matches):
"""
Match current profiles to any previously created profiles
:param profile_data: Dictionary of seq_type: {gene name:allele ID}
:param profile_dict: Dictionary of gene:allele profile for each sample
:param profile_matches: Dictionary of seq_type: matching profile
:return: profile_matches: Updated dictionary of seq_type: matching profiles
"""
# If the profile_data dictionary was not populated in the read_profiles methods,
# there is nothing to match
if profile_data:
logging.info('Matching new profiles against profile file')
# Extract the sequence type and allele dictionary from the profile file
for seq_type, allele_comprehension in profile_data.items():
# Freeze the allele comprehension as above
frozen_allele_comprehension = json.dumps(allele_comprehension, sort_keys=True)
try:
# Extract the samples that match this profile
matches = profile_dict[hash(frozen_allele_comprehension)]
# Update the dictionary with the matching samples
profile_matches[seq_type] = matches
# The profile will not necessarily match any of the profiles found in the analysis
except KeyError:
pass
return profile_matches
def create_profile(profile_data, profile_set, new_profiles, profile_dict, profile_matches):
"""
Create new profiles for novel profiles as required
:param profile_data: Dictionary of seq_type: {gene name:allele ID}
:param profile_set: List of all unique profiles
:param new_profiles: List of novel profiles
:param profile_dict: Dictionary of gene:allele profile for each sample
:param profile_matches: Dictionary of seq_type: matching profile
:return: profile_matches: Updated dictionary of seq_type: matching profiles
:return: profile_data: Updated dictionary of seq_type: {gene name: allele ID}
:return: new_profiles: Updated list of novel profiles
"""
# Initialise the sequence type to be 1
seq_type = 1
# If the profile_data dictionary exists, set the sequence type to be the last of the entries
# in the dictionary plus one, as that corresponds to the next sequence type
if profile_data:
# seq_type = len(profile_data) + 1
seq_type = sorted(int(st) for st in profile_data.keys())[-1] + 1
# Initialise a list to store the matched samples
matched = []
# Iterate through all the profiles in the analysis
for allele_comprehension in profile_set:
# Ensure that the allele comprehension (profile) is not already in the profile file
if allele_comprehension not in [profiled_alleles for st, profiled_alleles in profile_data.items()]:
# Add the new profile to the list of new profiles
alleles = '\t'.join(
allele_num.split('_')[-1] for gene, allele_num in sorted(
allele_comprehension.items()
)
)
new_profiles.append(
f'{seq_type}\t{alleles.rstrip()}'
)
# Freeze the comprehension in order to be used as the key in the profile dictionary
frozen_allele_comprehension = json.dumps(allele_comprehension, sort_keys=True)
matches = profile_dict[hash(frozen_allele_comprehension)]
# Check to see if this sequence type hasn't already been found in the current analysis
if matches not in matched:
# Update the dictionary with the new sequence type: list of samples
profile_matches[seq_type] = matches
profile_data[seq_type] = allele_comprehension
# Add the matches to the list of matches
matched.append(matches)
# Increment the sequence type number of the next entry
seq_type += 1
return profile_matches, profile_data, new_profiles
def sequence_typer(profile_report, data, runmetadata, profile_matches, profile_data,
update=False, amino_acid=False):
"""
Perform the final sequence typing, and create the report
:param profile_report: String of absolute path to report file
:param data: String of header including all gene names. To be used in creating final report
:param runmetadata: Metadata object containing a list of all metadata objects
:param profile_matches: Dictionary of seq_type: matching profile
:param profile_data: Dictionary of seq_type: {gene name:allele ID}
:param update: Boolean of whether the report is to be created or updated. Default is False (created)
:param amino_acid: Boolean of whether the query sequences are amino acid. Default is False
"""
# Open the report
mode = 'w' if not update else 'a+'
if not update:
# Initialise the header with an extra 'Sample' column plus the comma-separate list
# of gene names
data = 'Sample\t' + data
else:
if not os.path.isfile(profile_report):
# Initialise the header with an extra 'Sample' column plus the comma-separate list
# of gene names
data = 'Sample\t' + data
else:
data = str()
with open(profile_report, mode, encoding='utf-8') as report:
for sample in runmetadata.samples:
# Iterate through all the matches to sequence profiles in the analysis
for seq_type, sample_names in profile_matches.items():
# Check if the sample name is in the list of samples names with the current
# sequence type
if sample.name in sample_names:
# Add the sample name, sequence type, and all the allele numbers to the
# report string
complement = '\t'.join(allele_num.split('_')[-1] for gene, allele_num in sorted(
profile_data[seq_type].items())
)
data += f'{sample.name}\t{seq_type}\t{complement.rstrip()}\n'
# Update the appropriate GenObject based on the current molecule
# (DNA or amino acid)
if not amino_acid:
sample.alleles.nt_st = seq_type
sample.alleles.nt_profile = profile_data[seq_type]
else:
sample.alleles.aa_st = seq_type
sample.alleles.aa_profile = profile_data[seq_type]
# Write the report
report.write(data)
return runmetadata
def append_profiles(new_profiles, profile_file, data, novel_profiles=False, profile_path=None, gene_names=None):
"""
Add new profiles to the profile file
:param new_profiles: List of all novel profiles in this analysis
:param profile_file: Name and absolute path to a profile file
:param data: String of header including all gene names. To be used in creating final report
:param novel_profiles: Boolean of whether the novel_profiles.txt file is to be populated. Default is False
:param profile_path: String of absolute path of folder in which profiles are located
:param gene_names: List of all genes in the analysis
"""
# Only try to add new profiles if there are new profiles in the analysis
if new_profiles:
# Initialise the string to store the new profile
new_data = str()
# If the profile file does not exist, add the string of 'ST', comma-separated gene names
# to the headers
if not os.path.isfile(profile_file):
new_data = data
# Iterate through all the new profiles, and add them to the new profile string
for profile in new_profiles:
new_data += f'{profile}\n'
# Open the report with a+ to either create, or append the profile string to it
with open(profile_file, 'a+', encoding='utf-8') as profile:
profile.write(new_data)
if novel_profiles:
novel_profile_file = os.path.join(profile_path, 'novel_profiles.txt')
if not os.path.isfile(novel_profile_file):
with open(novel_profile_file, 'w', encoding='utf-8') as novel:
novel.write('ST\t{names}\n'.format(names='\t'.join(gene_names)))
with open(novel_profile_file, 'a+', encoding='utf-8') as novel:
novel.write(new_data)
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/allele_tools/allele_profiler.py
|
allele_profiler.py
|
# Standard imports
from argparse import ArgumentParser
from glob import glob
import logging
import shutil
import sys
import os
# Third-party imports
from olctools.accessoryFunctions.accessoryFunctions import make_path, SetupLogging
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
# Local imports
from allele_tools.allele_profiler import read_profile
from allele_tools.profile_reduce import ProfileReduce
from allele_tools.methods import \
evaluate_translated_length, \
generic_evaluate_translated_length, \
remove_combined_db_files, \
pathfinder
class Translate:
"""
Translate and reduce alleles
"""
def main(self):
"""
Run the appropriate methods in the correct order
"""
# Read in all the allele files
self.load_alleles()
# Parse the alleles
self.parse_alleles(length_dict=self.length_dict)
# If a profile file has been provided, run the profiling methods
if self.profile_file:
# Extract seq_type:allele comprehensions from the nucleotide profile file
self.profile_data = read_profile(profile_file=self.profile_file)
# Create the amino acid profiles
self.aa_profile()
# Remove duplicate profiles
reduce = ProfileReduce(
profile=self.aa_profile_file,
names=self.gene_name_file
)
reduce.main()
# Read in the profile data again now that the profile file has been updated
self.aa_profile_data = read_profile(profile_file=self.aa_profile_file)
# Find linkages between nucleotide and amino acid profiles
self.profile_link()
# Write profile links to file
self.link_file()
# Copy and rename the reduced profile file
self.copy_profile()
def load_alleles(self):
"""
Use SeqIO to read in all the gene sequences
"""
for allele_file in self.sequence_files:
gene_name = os.path.splitext(os.path.basename(allele_file))[0]
# Add the gene name to the set of names
self.gene_names.add(gene_name)
self.allele_dict[gene_name] = SeqIO.to_dict(SeqIO.parse(allele_file, 'fasta'))
def parse_alleles(self, length_dict=None):
"""
Parse the allele files to translate the amino acid sequence using BioPython. Write the
amino acid sequence to file. Store the allele name in the notes. Find duplicates, and link
the nucleotide allele name to the amino acid allele name
:param length_dict: Dictionary of gene name: minimum acceptable length of translated sequence
"""
logging.info('Translating and parsing alleles')
for gene_name, allele_dict in self.allele_dict.items():
# Initialise the dictionary to store the links between the nt alleles and the aa
# alleles with the name of the gene
self.allele_links[gene_name] = {}
# Initialise a set of allele that do not pass the required filters, and
# must be removed from the database
remove_list = set()
logging.info('Processing %s', gene_name)
# Initialise a dictionary to store the translated allele sequence: allele name
seq_allele = {}
# Set the name and path of the allele and notes files
allele_file = os.path.join(self.translated_path, f'{gene_name}.fasta')
notes_file = os.path.join(self.notes_path, f'{gene_name}_notes.txt')
# Open the file to store the translated alleles
with open(allele_file, 'w', encoding='utf-8') as aa_alleles:
# Open the notes file
with open(notes_file, 'w', encoding='utf-8') as notes:
# Create the header for the notes file
notes.write('nt_allele\taa_allele\tnote\n')
# Iterate through all the alleles in the dictionary
for allele, details in allele_dict.items():
# Create a list to store notes
note = []
# Create a boolean to track whether the sequence is filtered
filtered = False
# Create a string to store the untrimmed nucleotide sequence
original_nt_sequence = str(details.seq)
# Calculate the translated sequence
translated_allele = details.seq.translate()
# Remove all sequence after a stop codon (*)
split_allele = translated_allele.split('*')
# Create a string to hold the trimmed sequence
trimmed_seq = str()
# If there are multiple stop codons in the sequence, trim to the first one
if len(split_allele) > 2:
if split_allele[1]:
for trimmed in split_allele[1:]:
trimmed_seq += f'*{trimmed}'
else:
for trimmed in split_allele[2:]:
trimmed_seq += f'*{trimmed}'
note.append(f'Trimmed {trimmed_seq} from end')
elif len(split_allele) == 1:
pass
else:
if split_allele[-1] and not str(translated_allele).endswith('*'):
trimmed_seq += split_allele[-1]
note.append(f'Trimmed {trimmed_seq} from end')
# Create a dictionary to store the allele nt sequences to check to see if
# any are duplicates following trimming
nt_sequences = {}
filtered, note, nt_sequences, translated_allele = Translate.trim_alleles(
note=note,
allele=allele,
sequence=details,
gene_name=gene_name,
nt_allele_path=self.path,
trim_length=len(trimmed_seq),
length_dict=length_dict,
filtered=filtered,
nt_sequences=nt_sequences,
original_nt_sequence=original_nt_sequence
)
# If the allele has not been filtered through the trim_alleles
if not filtered:
# Determine if this amino acid allele is new
if str(translated_allele) not in seq_allele:
# Add the string of the amino acid sequence to the dictionary
seq_allele[str(translated_allele)] = allele
# Create a SeqRecord of the translated allele
seq_record = SeqRecord(
seq=translated_allele,
id=allele,
name=str(),
description=str()
)
# Write the SeqRecord to file
SeqIO.write(
sequences=seq_record,
handle=aa_alleles,
format='fasta'
)
# Update the notes with the allele naming information
notes.write(f'{allele}\t{allele}\t{";".join(note)}\n')
# Populate the linking dictionary with the nt allele: aa allele
self.allele_links[gene_name][allele.split('_')[-1]] = \
allele.split('_')[-1]
# Amino acid allele already exists
else:
# Extract the allele name corresponding to the translated sequence
aa_allele = seq_allele[str(translated_allele)]
# Update the notes, including that this allele is a duplicate, and a
# pointer to the original
notes.write(
f'{allele}\t{aa_allele}\tDuplicate'
)
if not note:
notes.write('\n')
else:
notes.write(f'; {";".join(note)}\n')
# Populate the linking dictionary with the nt allele: aa allele
self.allele_links[gene_name][allele.split('_')[-1]] = \
aa_allele.split('_')[-1]
self.allele_links[gene_name]['0'] = '0'
# Filtered alleles must be removed from the database
else:
Translate.write_filtered_allele_notes(
notes=notes,
allele=allele,
note=note,
)
remove_list.add(allele)
# Create a SeqRecord of the translated allele
seq_record = SeqRecord(
seq=translated_allele,
id=allele,
name=str(),
description=str()
)
# Write the filtered alleles to the filtered alleles file
Translate.create_or_update_filtered_files(
gene=gene_name,
allele_path=self.translated_path,
records_filter=[seq_record]
)
# Remove the filtered sequences from the database
Translate.remove_filtered_alleles_from_db(
gene_name=gene_name,
allele_list=remove_list,
nt_allele_path=self.path,
)
self.load_alleles()
@staticmethod
def trim_alleles(note, allele, sequence, gene_name, nt_allele_path, trim_length, length_dict, filtered,
nt_sequences, original_nt_sequence):
"""
Trim the alleles based on location of stop codons and whether the sequence is a multiple of three nucleotides.
Evaluate the trimmed sequence based on length and contents.
:param note: List of sequence-specific notes
:param allele: String of the allele identifier
:param sequence: SeqIO sequence object of nucleotide sequence
:param gene_name: String of the gene name to which the allele corresponds
:param nt_allele_path: String of the absolute path to the folder in which the nucleotide alleles are located
:param trim_length: Integer of the number of nucleotides to be trimmed (due to internal stop codons)
:param length_dict: Dictionary of minimum acceptable length for each gene in the analysis
:param filtered: Boolean to track whether the sequence fails the quality/length checks
:param nt_sequences: Dictionary of allele: sequence
:param original_nt_sequence: String of the untrimmed nucleotide sequence of the allele
:return: filtered: Updated boolean of whether the sequence fails quality/length checks
:return: note: Update list of sequence-specific notes
:return: nt_sequences: Updated dictionary of allele: sequence
:return: translated_allele: SeqIO sequence object of trimmed, translated allele
"""
# Determine the length of sequence to trim from the end of the sequence
# Multiply the number of amino acid residues to trim by three to get the number of nucleotides
# Add the modulo three of the sequence to yield a final length divisible by three
nt_to_trim = 3 * trim_length + len(sequence) % 3
# Check if trimming is required
if nt_to_trim:
# Slice the string by the number of required bases at the end
sequence = sequence[:-nt_to_trim]
# Update the nucleotide sequence in the database with the trimmed version
Translate.update_allele_db(
nt_allele_path=nt_allele_path,
gene=gene_name,
allele=allele,
nt_sequence=sequence
)
# Translate the sequence to amino acid
translated_allele = sequence.seq.translate()
# Perform content and length checks of the protein sequence
if length_dict:
filtered, note = evaluate_translated_length(
aa_seq=str(translated_allele),
length_dict=length_dict,
gene=gene_name,
notes=note,
filtered=filtered
)
else:
filtered, note = generic_evaluate_translated_length(
aa_seq=str(translated_allele),
sequence=original_nt_sequence,
gene=gene_name,
notes=note,
filtered=filtered,
cutoff=0.95
)
# Search the amino acid database for matches
filtered, note, nt_sequences = Translate.find_duplicates(
nt_sequences=nt_sequences,
nt_sequence=sequence,
allele=allele,
filtered=filtered,
note=note
)
# Update the notes if the sequence does not end with a stop codon
if not str(translated_allele).endswith('*'):
note.append('Trimmed sequence did not end with a stop codon')
return filtered, note, nt_sequences, translated_allele
@staticmethod
def update_allele_db(nt_allele_path, gene, allele, nt_sequence):
"""
Update nucleotide allele files with newly trimmed sequences
:param nt_allele_path: String of the absolute path to the folder containing the nucleotide allele database
:param gene: String of the name of the current gene
:param allele: String of the allele header (geneName_alleleID)
:param nt_sequence: SeqIO object of the nucleotide sequence
"""
# Set the name of the allele database file to update by joining the allele path and the gene name
nt_allele_file = os.path.join(nt_allele_path, f'{gene}.fasta')
# Create a list to store all database records (including modified ones)
updated_records = []
# Iterate over all the sequences in the nucleotide allele file
for record in SeqIO.parse(handle=nt_allele_file, format='fasta'):
# Check if the header of the database sequence matches the header of the query sequence
if record.id == allele:
# Update the record to be the query allele sequence object
record = nt_sequence
# Append the record to the list of records
updated_records.append(record)
# Overwrite the allele file with the records
SeqIO.write(updated_records, handle=nt_allele_file, format='fasta')
@staticmethod
def find_duplicates(nt_sequences, nt_sequence, allele, filtered, note):
"""
Match a query amino acid sequence against the protein allele database file
:param nt_sequences: Dictionary of allele: sequence
:param nt_sequence: SeqIO sequence object of the trimmed nucleotide sequence
:param allele: String of the allele header (geneName_alleleID)
:param filtered: Boolean of whether the sequence passes content/length filters
:param note: List of sequence-specific notes
:return: filtered: Updated boolean of sequence-specific content/length filtering results
:return: note: Updated list of sequence-specific notes
:return: nt_sequences: Updated list of allele: sequence
"""
# Initialise a list to store any matches
matches = []
# Iterate over allele header, sequence in the dictionary
for prev_allele, nt_seq in nt_sequences.items():
# Check if the current nucleotide sequence matches a previous entry
if nt_sequence == nt_seq:
# Append matches to the list
matches.append(prev_allele)
# Check if the list has been populated with matches
if not matches:
# If no matches, update the dictionary with the novel sequence
nt_sequences[allele] = nt_sequences
return filtered, note, nt_sequences
# If there are matches
for match in matches:
# Set the filtering boolean to True (this is not a novel sequence, so do not update the database)
filtered = True
# Update the note
note.append(f'Trimmed nt sequence matches previous allele sequence: {match}')
return filtered, note, nt_sequences
@staticmethod
def write_filtered_allele_notes(notes, allele, note):
"""
Write notes for filtered sequences to file
:param notes: Filehandle for notes file
:param allele: String of the allele header (geneName_alleleID)
:param note: List of sequence-specific notes
"""
# Write the allele \t ND (because the sequence is filtered,there is no amino acid allele) \t
notes.write(f'{allele}\tND\tFiltered: {"; ".join(note)}\n')
@staticmethod
def remove_filtered_alleles_from_db(gene_name, allele_list, nt_allele_path):
"""
Remove filtered sequences from the allele database file
:param gene_name: String of the gene name currently being analysed
:param allele_list: List of alleles to be removed from the database
:param nt_allele_path: String of the absolute path to the folder containing the nucleotide allele database
"""
# Remove the combinedtargets.fasta and BLAST database files from the folder
remove_combined_db_files(
allele_path=nt_allele_path
)
# Set the name and path of the allele file
nt_allele_file = os.path.join(nt_allele_path, f'{gene_name}.fasta')
# Create a list of all the records in the database file using SeqIO
records = SeqIO.parse(nt_allele_file, 'fasta')
# Initialise a list to store unfiltered records
records_keep = []
# Initialise a list to store filtered records
records_filter = []
# Iterate over all the records in the database
for record in records:
# Check if the allele header matches any of the headers of alleles to be filtered
if record.id not in allele_list:
# If the record is not to be filtered, add it to the keep list
records_keep.append(record)
# If the record header matches any of the alleles to be filtered, add it to the filtered list
else:
records_filter.append(record)
# Overwrite the nucleotide allele database file with all the unfiltered records
with open(nt_allele_file, 'w', encoding='utf-8') as allele_file:
for record in records_keep:
SeqIO.write(
sequences=record,
handle=allele_file,
format='fasta'
)
# Write the filtered records to the filtered records file
if records_filter:
Translate.create_or_update_filtered_files(
gene=gene_name,
allele_path=nt_allele_path,
records_filter=records_filter
)
@staticmethod
def create_or_update_filtered_files(gene, allele_path, records_filter):
"""
Write the filtered alleles to the filtered alleles file
:param gene: String of the gene name currently being analysed
:param allele_path: String of the absolute path to the folder containing the allele database
:param records_filter: List of SeqIO sequence objects for alleles to be added to the filtered alleles file
"""
# Set the name and path of the filtered alleles file
filtered_allele_file = os.path.join(allele_path, f'{gene}_filtered.txt')
# Append the filtered alleles to the file
with open(filtered_allele_file, 'a+', encoding='utf-8') as filtered_alleles:
for record in records_filter:
SeqIO.write(
sequences=record,
handle=filtered_alleles,
format='fasta'
)
def aa_profile(self):
"""
Create the amino acid profile
"""
# Create a list to store profiles containing alleles that have been quality filtered
filtered_list = set()
# Initialise a dictionary to store sequenceType:geneName_alleleID
filtered_dict = {}
# Initialise a string to hold the profile
profile_str = ''
# Iterate through all the sequence types in the profile_data dictionary
for seq_type in sorted(int(st) for st in self.profile_data):
# Create a string to store the allele information
allele_string = str()
# Iterate through all the gene: allele entries in the nested dictionary
for gene_name, allele in self.profile_data[str(seq_type)].items():
try:
# Extract the linked amino acid allele from the dictionary
allele_string += self.allele_links[gene_name][str(allele)] + '\t'
# If the gene name + allele ID is not in the allele link dictionary, add the sequence type to the set
except KeyError:
filtered_list.add(seq_type)
# Initialise the sequence type key in the filtered dictionary as required
if seq_type not in filtered_dict:
filtered_dict[seq_type] = []
# Update the dictionary with the filtered geneName_alleleIdentifier
filtered_dict[seq_type].append(f'{gene_name}_{allele}')
if allele_string:
# Add the sequence type to the profile string
profile_str += f'{str(seq_type)}\t{allele_string}'
# Remove trailing whitespace and add a newline for proper formatting
profile_str = profile_str.rstrip()
profile_str += '\n'
# Create the amino acid profile file
with open(self.aa_profile_file, 'w', encoding='utf-8') as aa_profile:
# Create a string of tab-delimited gene names to be used in the header
names = '\t'.join(sorted(list(self.gene_names)))
# Write the header string
aa_profile.write(f'ST\t{names.rstrip()}\n')
# Write the tab-delimited profile string
aa_profile.write(profile_str)
# Create the names file
with open(self.gene_name_file, 'w', encoding='utf-8') as gene_file:
# Write the names to file
gene_file.write('\n'.join(sorted(list(self.gene_names))))
if filtered_list:
filtered_list = sorted(list(filtered_list))
# Remove the filtered profiles from the profile files
Translate.filter_profiles(
filtered_list=filtered_list,
profile_file=self.profile_file,
)
# Write the notes to file
Translate.filter_notes(
filtered_list=filtered_list,
filtered_dict=filtered_dict,
profile_path=os.path.dirname(self.profile_file)
)
self.profile_data = read_profile(profile_file=self.profile_file)
@staticmethod
def filter_profiles(filtered_list, profile_file):
"""
Remove filtered profiles from the profile file. Write them to the filtered profile file
:param filtered_list: Set of filtered sequence types
:param profile_file: String of the absolute path to the profile file
"""
# Initialise a list to store the filtered sequence types
filtered_rows = []
# Initialise a list to store the unfiltered sequence types
keep_rows = []
# Read in the profile files
with open(profile_file, 'r', encoding='utf-8') as profiles:
# Iterate over all the profiles in the profile files
for row in profiles:
# Extract the sequence type from the row
seq_type = row.split('\t')[0]
# Check if it is the header, which starts with 'ST'
if seq_type == 'ST':
# Filter the header
filtered_rows.append(row)
# Check to see if the sequence type of the row is present in the list of filtered sequence types
if str(seq_type) in [str(filtered) for filtered in filtered_list]:
# Add the row to the list of filtered rows
filtered_rows.append(row)
# Unfiltered rows are added to the list of unfiltered rows
else:
keep_rows.append(row)
# Extract the absolute path of the folder in which the profile file is located
profile_path = os.path.dirname(profile_file)
# Overwrite the profile file with the unfiltered profiles
with open(os.path.join(profile_path, 'profile.txt'), 'w', encoding='utf-8') as updated_profile:
updated_profile.write(''.join(keep_rows))
# Overwrite the filtered profile file with the filtered profiles
with open(os.path.join(profile_path, 'filtered_profiles.txt'), 'w', encoding='utf-8') as filtered_profiles:
filtered_profiles.write(''.join(filtered_rows))
@staticmethod
def filter_notes(filtered_list, filtered_dict, profile_path):
"""
Write the notes regarding profile filtering to file
:param filtered_list: Set of filtered sequence types
:param filtered_dict: Dictionary of sequenceType:geneName_alleleID
:param profile_path: String of the absolute path to the folder containing the profile file
:return:
"""
# Set the absolute path of the file containing the profile filtering notes
filtered_notes = os.path.join(profile_path, 'filtering_notes.txt')
# Write the notes to file
with open(filtered_notes, 'w', encoding='utf-8') as notes:
# Create the header
notes.write('SequenceType\tFilteringNote\n')
# Create a new line for each filtered profile
for seq_type in filtered_list:
# Create a sting of the list of filtered alleles present in this profile
gene_allele = ';'.join(filtered_dict[seq_type])
# Write the sequence type and all the missing alleles to the note
notes.write(f'{seq_type}\t{gene_allele} missing\n')
def profile_link(self):
"""
Link nucleotide and amino profiles
"""
# Initialise a dictionary to store to number of matches between a query profile and the profiles in the database
match_score = {}
# Iterate over all the profiles in the profile file
for seq_type, gene_dict in self.profile_data.items():
# Initialise the seq_type key in the dictionary
self.profile_matches[seq_type] = set()
match_score[seq_type] = {}
# Iterate over all the gene name, allele ID combinations in the profile dictionary
for gene, allele in gene_dict.items():
# Iterate over all the profiles in the amino acid profile file
for aa_st, aa_gene_dict in self.aa_profile_data.items():
# Use the gene name to extract the amino acid allele ID from the profile file.
# Also extract the amino acid allele ID from the linking dictionary with the gene name and
# nucleotide allele ID. Check if they match
if aa_gene_dict[gene] == self.allele_links[gene][allele]:
# Initialise the amino acid sequence type in the dictionary
if aa_st not in match_score[seq_type]:
match_score[seq_type][aa_st] = 0
# Increment the number of matches to the profile
match_score[seq_type][aa_st] += 1
# Iterate over all the matches to the profiles in the profile file
for seq_type, aa_st_dict in match_score.items():
# Iterate over the amino acid sequence type matches
for aa_st, matches, in aa_st_dict.items():
# Check if the number of matches observed is equal to the required number of matches (one for each gene)
if matches == len(self.gene_names):
# Update the dictionary of matches with the amino acid sequence type: nucleotide sequence type
self.profile_matches[aa_st].add(seq_type)
def link_file(self):
"""
Write linking details between nucleotide and amino acid profiles to file
"""
#
with open(self.aa_nt_profile_link_file, 'w', encoding='utf-8') as link:
# Write the header information
link.write('aa_seq_type\tnt_seq_types\n')
# Iterate over all the profile matches in the dictionary
for seq_type, match_set in self.profile_matches.items():
# Check the length of the match
if len(match_set) == 1:
# With a single match, convert the set to a string
nt_st = ''.join(match_set)
link.write(f'{seq_type}\t{nt_st}\n')
# Multiple profile matches
else:
# Semicolon-join the set of matches. Since they are strings, first typecase to int for proper
# sorting before converting back to strings
nt_st = ';'.join(str(nt_st) for nt_st in sorted(int(nt_st) for nt_st in match_set))
link.write(f'{seq_type}\t{nt_st}\n')
def copy_profile(self):
"""
Copy the reduced profile file from the profile folder to the base aa_profile folder
"""
# Use shutil to copy and rename the profile file to the root of the report path
shutil.copyfile(
src=os.path.join(self.report_path, 'profile', 'profile.txt'),
dst=os.path.join(self.report_path, 'profile.txt')
)
def __init__(self, path, profile, report_path=os.path.join(os.getcwd(), 'aa_profile'),
translated_path=os.path.join(os.getcwd(), 'aa_alleles'), length_dict=None):
logging.info('Welcome to the allele translator!')
self.path = pathfinder(path=path)
if profile:
if not os.path.isfile(profile):
self.profile_file = os.path.join(self.path, 'nt_profile', 'profile.txt')
else:
self.profile_file = profile
try:
assert os.path.isfile(self.profile_file)
except AssertionError as exc:
logging.error(
'Cannot locate the required profile file: %s. Please ensure that '
'the file name and path of your file is correct', self.profile_file
)
raise SystemExit from exc
else:
self.profile_file = None
self.sequence_files = glob(os.path.join(self.path, '*.fasta'))
try:
assert self.sequence_files
except AssertionError as exc:
logging.error('Could not locate alleles in provided allele path: %s', self.path)
raise SystemExit from exc
self.report_path = pathfinder(path=report_path)
make_path(inpath=self.report_path)
self.translated_path = pathfinder(path=translated_path)
make_path(inpath=self.translated_path)
self.notes_path = os.path.join(self.translated_path, 'notes')
make_path(inpath=self.notes_path)
self.length_dict = length_dict
self.allele_dict = {}
self.profile_data = {}
self.allele_links = {}
self.aa_profile_file = os.path.join(self.report_path, 'aa_full_profile.txt')
self.gene_names = set()
self.gene_name_file = os.path.join(self.report_path, 'gene_names.txt')
self.aa_profile_data = {}
self.profile_matches = {}
self.aa_nt_profile_link_file = os.path.join(
self.report_path,
'aa_nt_profile_links.tsv'
)
def cli():
"""
Collect the arguments, create an object, and run the script
"""
# Parser for arguments
parser = ArgumentParser(
description='Translate allele files in nucleotide format to amino acid. '
'Remove duplicates. Keep notes.'
)
parser.add_argument(
'-p', '--path',
required=True,
help='Specify path containing allele files.'
)
parser.add_argument(
'--profile',
action='store_true',
help='Optionally parse the nucleic acid profile, and create the corresponding reduced '
'amino acid profile. The profile must be named profile.txt and be located in nt_profile folder in the path'
)
parser.add_argument(
'--report_path',
default=os.path.join(os.getcwd(), 'aa_profile'),
help='Optionally provide the name (and path, if desired) of the folder into which the amino acid profile '
'and related files are to be written. Default folder is "aa_profile" in your current working directory'
)
parser.add_argument(
'--translated_path',
default=os.path.join(os.getcwd(), 'aa_alleles'),
help='Optionally provide the name (and path, if desired) of the folder into which the amino acid alleles '
'and notes are to be written. Default folder is "aa_alleles" in your current working directory'
)
# Get the arguments into an object
arguments = parser.parse_args()
SetupLogging(debug=True)
translate = Translate(
path=arguments.path,
profile=arguments.profile,
report_path=arguments.report_path,
translated_path=arguments.translated_path
)
translate.main()
logging.info('Allele translation complete!')
# Prevent the arguments being printed to the console (they are returned in order for the tests to work)
sys.stderr = open(os.devnull, 'w', encoding='utf-8')
return arguments
if __name__ == '__main__':
cli()
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/allele_tools/allele_translate_reduce.py
|
allele_translate_reduce.py
|
# Standard imports
from argparse import ArgumentParser
from csv import DictReader
import logging
import sys
import os
# Third-party imports
from olctools.accessoryFunctions.accessoryFunctions import \
make_path, \
SetupLogging
from allele_tools.methods import pathfinder
class ProfileReduce:
"""
Reduce Enterobase wgMLST profiles
"""
def main(self):
"""
Run the required methods in the appropriate order
"""
self.read_names()
self.read_profile()
def read_names(self):
"""
Read in all the names of the genes of interest
"""
logging.info('Reading names file')
with open(self.name_file, 'r', encoding='utf-8') as names:
self.names = sorted([name.rstrip() for name in names.readlines()])
logging.debug('Genes used to reduce profile are: %s', '\t'.join(self.names))
def read_profile(self):
"""
Load, parse, and reduce the profile information
"""
logging.info('Reducing profiles')
with open(self.profile, 'r', encoding='utf-8') as profile:
with open(self.reduced_profile, 'w', encoding='utf-8') as reduced:
with open(self.notes_file, 'w', encoding='utf-8') as notes:
# Write the header for the notes field
notes.write('OriginalSequenceType\tReducedSequenceType\tNotes\n')
# Create the header for the reduced profile
gene_names = '\t'.join(self.names)
reduced_data = f'ST\t{gene_names}\n'
logging.info('Loading profile into memory')
# Use to DictReader to load the profile file
profile_dict = DictReader(profile, dialect='excel-tab')
# Initialise a dictionary to store the string of allele numbers: sequence type
profile_st = {}
logging.info('Parsing profile')
# Iterate through all the sequence types in the profile
for allele_dict in profile_dict:
# Extract the sequence type from the 'ST' column
seq_type = allele_dict['ST']
# Initialise variables to store the allele numbering information
allele_list = []
allele_str = ''
# Iterate through all the genes of interest
for gene in self.names:
try:
allele_str += allele_dict[gene]
# If an allele has been filtered based on length, the sequence type needs to be removed
except TypeError:
continue
# Add the allele number to the list, and to the string
allele_list.append(allele_dict[gene])
# Check if the string of allele numbers is already in the dictionary
if allele_str not in profile_st:
# Update the dictionary with the string of alleles: sequence type
profile_st[allele_str] = allele_dict[self.names[0]]
alleles = '\t'.join(allele_list)
# Add the sequence type allele numbers for this sequence to the string
reduced_data += f'{seq_type}\t{alleles.rstrip()}\n'
# Updated the notes with the sequence type linking information
notes.write(f'{seq_type}\t{seq_type}\n')
# Reduced profile already exists
else:
# Extract the original sequence type with this string of allele numbers
original_seq_type = profile_st[allele_str]
# Write the sequence type linking information, making note of the fact
# that this is a duplicate
notes.write(f'{seq_type}\t{original_seq_type}\tduplicate\n')
# Write the reduced profile information to file
reduced.write(reduced_data)
def __init__(self, profile, names, output='profile'):
logging.info('Welcome to profile reducer!')
self.profile = pathfinder(path=profile)
try:
assert os.path.isfile(self.profile)
except AssertionError as exc:
logging.error('Cannot locate the specified profile file: %s', self.profile)
raise SystemExit from exc
# Create the folder into which the reduced profile and notes are to be placed
self.report_path = os.path.join(os.path.dirname(self.profile), output)
make_path(self.report_path)
self.reduced_profile = os.path.join(self.report_path, 'profile.txt')
self.notes_file = os.path.join(self.report_path, 'reducing_notes.txt')
self.name_file = pathfinder(path=names)
try:
assert os.path.isfile(self.name_file)
except AssertionError as exc:
logging.error('Cannot find the supplied file with gene names: %s', self.name_file)
raise SystemExit from exc
self.names = []
self.profile_dict = {}
self.allele_dict = {}
def cli():
"""
Collect the arguments, create an object, and run the script
"""
# Parser for arguments
parser = ArgumentParser(description='Extract the genes of interest from a profile file')
parser.add_argument(
'-p', '--profile',
metavar='profile',
required=True,
help='Name and path of profile file.')
parser.add_argument(
'-n', '--names',
metavar='names',
required=True,
help='Name and path to a file containing the gene names (one per line) to be extracted '
'from the profile')
# Get the arguments into an object
arguments = parser.parse_args()
SetupLogging(debug=True)
reduce = ProfileReduce(
profile=arguments.profile,
names=arguments.names
)
reduce.main()
logging.info('Profile reduction complete!')
# Prevent the arguments being printed to the console (they are returned in order for the tests to work)
sys.stderr = open(os.devnull, 'w', encoding='utf-8')
return arguments
if __name__ == '__main__':
cli()
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/allele_tools/profile_reduce.py
|
profile_reduce.py
|
# Standard imports
from argparse import ArgumentParser
from csv import DictReader
from glob import glob
import json
import logging
import math
import os
# Third party inputs
from olctools.accessoryFunctions.accessoryFunctions import (
GenObject,
make_path,
MetadataObject,
relative_symlink
)
from Bio.Blast.Applications import NcbiblastnCommandline, NcbiblastpCommandline
from Bio.Data.CodonTable import TranslationError
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio import SeqIO
import coloredlogs
def setup_logging(arguments):
"""
Set the custom colour scheme and message format to used by coloredlogs
:param arguments: type parsed ArgumentParser object
"""
# Set up a dictionary of the default colour scheme, and font styles
coloredlogs.DEFAULT_LEVEL_STYLES = {
'debug': {
'bold': True, 'color': 'green'},
'info': {
'bold': True, 'color': 'blue'},
'warning': {
'bold': True, 'color': 'yellow'},
'error': {
'bold': True, 'color': 'red'},
'critical': {
'bold': True, 'background': 'red'}
}
# Change the default log format to be the time prepended to the appropriately formatted
# message string
coloredlogs.DEFAULT_LOG_FORMAT = '%(asctime)s %(message)s'
# Set the logging level
coloredlogs.install(level=arguments.verbosity.upper())
def setup_arguments(parser: ArgumentParser):
"""
Finalise setting up the ArgumentParser arguments into an object, and running subparser
functions, or displaying the help message
:param parser: type: ArgumentParser object
:return: parsed ArgumentParser object
"""
# Get the arguments into an object
arguments = parser.parse_args()
# Run the appropriate function for each sub-parser.
if hasattr(arguments, 'func'):
# Set up logging
setup_logging(arguments=arguments)
arguments.func(arguments)
# If the 'func' attribute doesn't exist, display the basic help for the appropriate subparser
# (if any)
else:
try:
# Determine which subparser was called by extracting it from the arguments.
# Note that this requires the use of the desc keyword when creating subparsers
command = list(vars(arguments).keys())[0]
# If the extracted command exists, use the command-specific subparser help
if command:
parser.parse_args([command, '-h'])
# Otherwise, use the basic help
else:
parser.parse_args(['-h'])
# If there were no subparsers specified (the list of keys in the arguments is empty),
# use the basic help
except IndexError:
parser.parse_args(['-h'])
return arguments
def common_allele_find_errors(
args: ArgumentParser.parse_args,
errors: list,
amino_acid: bool):
"""
Perform checks for arguments shared between allele finding scripts
:param args: type ArgumentParser arguments
:param errors: List of errors with supplied arguments
:param amino_acid: Boolean of whether the query sequence is amino acid or nucleotide
:return: Updated list of errors
"""
# Query folder checks
if not os.path.isdir(args.query_path):
errors.append(f'Could not find supplied query folder: {args.query_path}')
else:
if not glob(os.path.join(args.query_path, '*.fasta')):
errors.append(
f'Could not locate sequence files in supplied query folder: {args.query_path}'
)
else:
if amino_acid:
errors = detect_protein(
query_path=args.query_path,
errors=errors
)
# Nucleotide and amino acid checks
errors = profile_allele_check(
args=args,
errors=errors
)
return errors
def profile_allele_check(
args: ArgumentParser.parse_args,
errors: list):
"""
Perform checks for arguments pertaining to profile and allele files
:param args: type ArgumentParser arguments
:param errors: List of errors with supplied arguments
:return: Updated list of errors
"""
# Nucleotide checks
try:
if not os.path.isdir(args.nt_alleles):
errors.append(f'Could not find supplied nucleic acid allele folder: {args.nt_alleles}')
else:
if not glob(os.path.join(args.nt_alleles, '*.fasta')):
errors.append(
f'Could not locate sequence files in supplied nucleic acid allele folder: {args.nt_alleles}'
)
# Allows for checks in analyses without nucleotide sequences
except AttributeError:
pass
try:
if not os.path.isfile(args.nt_profile):
errors.append(f'Could not locate supplied nucleic acid profile file: {args.nt_profile}')
except AttributeError:
pass
# Amino acid checks
try:
if not os.path.isdir(args.aa_alleles):
errors.append(f'Could not find supplied amino acid allele folder: {args.aa_alleles}')
else:
if not glob(os.path.join(args.aa_alleles, '*.fasta')):
errors.append(
f'Could not locate sequence files in supplied amino acid allele folder: {args.aa_alleles}'
)
except AttributeError:
pass
try:
if not os.path.isfile(args.aa_profile):
errors.append(f'Could not locate supplied amino acid profile file: {args.aa_profile}')
except AttributeError:
pass
return errors
def error_print(
errors: list):
"""
Log grammatically correct error messages and exit
:param errors: List of errors with supplied arguments
"""
# Create variables to allow for grammatically correct error messages
error_string = '\n'.join(errors)
was_were = 'was' if len(errors) == 1 else 'were'
correct = 'error' if len(errors) == 1 else 'errors'
logging.error(
'There %s %s %s when attempting to run your command: \n%s', was_were, len(errors), correct, error_string)
raise SystemExit
def detect_protein(
query_path: str,
errors: list):
"""
Attempt to determine whether a supplied file contains protein sequence
:param query_path: String of absolute path to folder containing sequence files
:param errors: List of errors with supplied arguments
:return: Updated list of errors
"""
# Create a list of all the FASTA files in the query path
seq_files = glob(os.path.join(query_path, '*.fasta'))
# Iterate through all the files
for seq_file in seq_files:
# Initialise a boolean to track whether the sequences appear to be amino acid or nucleotide
aa = False
for record in SeqIO.parse(seq_file, format='fasta'):
# Convert the sequence object to a string
seq = str(record.seq)
# Create a set of all the characters in the string of the sequence
seq_set = set(seq)
# Since there are only 4(5 with N, 6 with N and -) possible characters in DNA, but 20 in protein, I chose
# a length of 10 to allow for relatively low complexity protein sequences to pass, but DNA to fail
if len(seq_set) > 10:
# Update the boolean - note that only a single sequence in the file needs to be considered protein for
# the entire file to pass
aa = True
# Update the errors if the file appears to be DNA
if not aa:
errors.append(f'Query file {seq_file} does not appear to be protein')
return errors
def pathfinder(path: str):
"""
Create absolute path user-supplied path. Allows for tilde expansion from
:param path: String of path supplied by user. Could be relative, tilde expansion, or absolute
:return: out_path: String of absolute path provided by user.
"""
# Determine if the path requires path expansion
if path.startswith('~'):
# Create the absolute path of the tilde expanded path
out_path = os.path.abspath(os.path.expanduser(os.path.join(path)))
else:
# Create the absolute path from the path
out_path = os.path.abspath(os.path.join(path))
return out_path
def query_prep(
query_path: str,
runmetadata: MetadataObject,
clear_report=True):
"""
Create MetadataObjects for each sample
:param query_path: String of absolute path to folder containing sequence files
:param runmetadata: MetadataObject with list of GenObjects for each query
:param clear_report: Boolean of whether to clear previous iterations of BLAST reports. Default is True
:return runmetadata: MetadataObject updated with query information
"""
logging.info('Preparing query files')
# Find all the sequence files in the path
fasta_files = sorted(glob(os.path.join(query_path, '*.fasta')))
for fasta in fasta_files:
name = os.path.splitext(os.path.basename(fasta))[0]
if name != 'combinedtargets':
# Create a MetadataObject for each sample
metadata = MetadataObject()
metadata.samples = []
# Populate the MetadataObject with the required attributes
metadata.name = name
metadata.general = GenObject()
metadata.commands = GenObject()
metadata.alleles = GenObject()
metadata.alleles.outputdirectory = os.path.join(query_path, metadata.name)
# Set the name of the BLAST output file
metadata.alleles.blast_report = os.path.join(
metadata.alleles.outputdirectory,
f'{metadata.name}.tsv'
)
# As the name and number of alleles can change over multiple iterations of the
# program, it's best to clear out any old reports before running again
if clear_report:
try:
os.remove(metadata.alleles.blast_report)
except FileNotFoundError:
pass
make_path(metadata.alleles.outputdirectory)
# Create a symlink of the sequence file in its own subdirectory
metadata.general.bestassemblyfile = relative_symlink(
src_file=fasta,
output_dir=metadata.alleles.outputdirectory,
export_output=True
)
metadata.samples.append(metadata)
runmetadata.samples.append(metadata)
return runmetadata
def blast_alleles(
runmetadata: MetadataObject,
amino_acid: bool,
combined_targets: str,
cpus: int,
outfmt: str):
"""
Run the BLAST analyses on the query
:param runmetadata: MetadataObject with list of GenObjects for each query
:param amino_acid: Boolean of whether the query sequence is amino acid or nucleotide
:param combined_targets: String of absolute path to file containing all sequences in other files in folder
:param cpus: Integer of number of threads to use for BLAST analyses
:param outfmt: String of BLAST fields to include in the report
"""
logging.info('Running BLAST analyses')
# Iterate through the samples
for sample in runmetadata.samples:
# Run the appropriate BLAST command BLASTN for nucleotide, BLASTP for protein
if not amino_acid:
blast = NcbiblastnCommandline(
db=os.path.splitext(combined_targets)[0],
query=sample.general.bestassemblyfile,
num_alignments=100000000,
evalue=0.001,
num_threads=cpus,
task='blastn',
outfmt=outfmt,
out=sample.alleles.blast_report
)
else:
blast = NcbiblastpCommandline(
query=sample.general.bestassemblyfile,
db=os.path.splitext(combined_targets)[0],
evalue=0.001,
num_alignments=100000000,
num_threads=cpus,
outfmt=outfmt,
out=sample.alleles.blast_report
)
blast()
def create_gene_names(
path=os.getcwd(),
name='genes.txt'):
"""
Create a file with gene names to use in reducing a wgMLST profile by finding any .fasta files
in a folder and adding them to the file (one per line)
:param path: type: String of the path in which the genes file is to be created
:param name: type: String of the name of the gene file
"""
# Find all the .fasta files in the path
fasta_files = glob(os.path.join(path, '*.fasta'))
# Set the name and path of the gene file
gene_file = os.path.join(path, name)
# Open the gene file to write
with open(gene_file, 'w', encoding='utf-8') as genes_file:
for fasta in fasta_files:
# Remove the path information and the file extension. Print to file
genes_file.write(os.path.basename(os.path.splitext(fasta)[0]) + '\n')
logging.debug(
'Adding %s to %s',
os.path.basename(os.path.splitext(fasta)[0]), gene_file
)
# Check to see if the file is empty
if os.stat(gene_file).st_size == 0:
# Log an error stating that the file could not be properly populated
logging.error(
'Created gene file, %s, is empty. Please ensure that directory %s has files with '
'.fasta extensions', gene_file, path
)
raise SystemExit
def create_blast_dict(
sample: MetadataObject,
extended_fieldnames: list):
"""
Use DictReader to open and read a BLAST report into a dictionary
:param sample: MetadataObject with list of GenObjects for each query
:param extended_fieldnames: List of the BLAST fields used, as well as the additional percent
match in index 14
"""
# Open the BLAST report as a dictionary
blastdict = DictReader(
open(sample.alleles.blast_report,
encoding='utf-8'),
fieldnames=extended_fieldnames,
dialect='excel-tab'
)
return blastdict
def parse_colocated_results(
runmetadata: MetadataObject,
fieldnames: list,
extended_fieldnames: list,
amino_acid: bool,
gene_names: list,
nt_allele_path: str,
aa_allele_path: str,
report_path: str,
overlap_range=50,
cutoff=90):
"""
Parse BLAST outputs. Ensure co-location of genes that must be co-located
:param runmetadata: MetadataObject with list of GenObjects for each query
:param fieldnames: List of the BLAST fields used
:param extended_fieldnames: List of the BLAST fields used, as well as the additional percent
match in index 14
:param amino_acid: Boolean of whether targets are protein
:param gene_names: List of all gene names in the analysis
:param nt_allele_path: String of the absolute path to the folder containing nucleotide allele files
:param aa_allele_path: String of the absolute path to the folder containing amino acid allele files
:param report_path: String of the absolute path to the folder into which reports are to be written
:param overlap_range: Integer of the maximum distance allowed between two genes in order for
them to be considered co-located. Default is 50 bp
:param cutoff: Integer of the minimum percent identity between query and subject sequence.
Default is 100%
:return: runmetadata: Updated MetadataObjects
:return: notes: List of contig:query_range-specific notes
"""
logging.info('Parsing BLAST outputs')
notes = {}
for sample in runmetadata.samples:
# Initialise GenObjects as required
sample.alleles.blastlist = []
sample.alleles.targetsequence = {}
# Read the BLAST outputs into a dictionary
blastdict = create_blast_dict(
sample=sample,
extended_fieldnames=extended_fieldnames
)
# Initialise dictionaries to store parsed outputs
resultdict = {}
colocation_dict = {}
processed_range_dict = {}
# Go through each BLAST result
for row in blastdict:
# Ignore the headers
if row['query_id'].startswith(fieldnames[0]):
continue
target_id = row['subject_id']
target_start = row['subject_start']
target_end = row['subject_end']
target_seq = row['query_sequence']
high = max([int(row['query_start']), int(row['query_end'])])
low = min([int(row['query_start']), int(row['query_end'])])
# Create a list of the properly ordered start and stop points of the match
query_range = [low, high]
# Remove unwanted pipes added to the name
nt_allele = target_id.lstrip('gb|').rstrip('|') if '|' in target_id else \
target_id
# If the percent identity is equal to the cutoff
if float(row['percent_match']) >= cutoff:
# Append the hit dictionary to the list
sample.alleles.blastlist.append(row)
# Determine if the orientation of the sequence is reversed compared to
# the reference sequence
if int(target_end) < int(target_start) and not amino_acid:
seq = Seq(target_seq)
# Calculate the reverse complement of the sequence
nt_querysequence = str(seq.reverse_complement())
# If the sequence is not reversed, use the sequence as it is in the output
else:
nt_querysequence = target_seq
# Create a variable to avoid unnecessary typing
contig = row['query_id']
# Create a boolean to track whether this contig:range combination has already been processed
processed = False
# Add the contig key to the dictionary as required
if contig not in processed_range_dict:
processed_range_dict[contig] = set()
# Check the processed range dictionary to see if the current range is present
if processed_range_dict[contig]:
for previous_range in processed_range_dict[contig]:
# Allow a small overlap of five bases in case the range of one query is slightly different
overlap = query_range[1] + 5 >= previous_range[0] and \
previous_range[1] + 5 >= query_range[0]
# If the range is already present in the dictionary, update the tracking boolean
if overlap:
processed = True
# Add the range to the set if it is empty
else:
processed_range_dict[contig].add(tuple(query_range))
# If range has already been processed, we can skip this iteration of it
if processed:
continue
# Update the processed ranges dictionary with the current range
processed_range_dict[contig].add(tuple(query_range))
# Create a tuple of the query range list to allow it to be used as a dictionary key
query_range_tuple = tuple(query_range)
# Add keys to the targetsequence dictionary as required
if contig not in sample.alleles.targetsequence:
sample.alleles.targetsequence[contig] = {}
if query_range_tuple not in sample.alleles.targetsequence[contig]:
sample.alleles.targetsequence[contig][query_range_tuple] = {}
# Populate the percent match dictionary
if contig not in resultdict:
resultdict[contig] = {}
if query_range_tuple not in resultdict[contig]:
resultdict[contig][query_range_tuple] = {nt_allele: row['percent_match']}
# Populate the notes dictionary
if contig not in notes:
notes[contig] = {}
if query_range_tuple not in notes[contig]:
notes[contig][query_range_tuple] = []
# Determine the name of the gene corresponding to the allele e.g. if the allele
# ECs1206_138, the corresponding gene is ECs1206_
base_gene = [gene_name for gene_name in gene_names if gene_name in nt_allele][0]
# Translate the query sequence to protein
aa_querysequence = translate_sequence(
nt_seq=nt_querysequence
)
# Find the amino acid allele corresponding to this sequence
returned = aa_allele_lookup(
aa_seq=aa_querysequence,
gene=base_gene,
aa_allele_path=aa_allele_path,
notes=notes[contig][query_range_tuple]
)
# Initialise a string to hold the amino acid allele identifier
aa_allele = str()
# Create a boolean to track whether the amino acid sequence fails the screen criteria
filtered = False
# If a perfect match to a previous allele was found, a string is returned
if isinstance(returned, str):
aa_allele = returned
# If not perfect match was found to a previous allele, a tuple of the updated filtered boolean, and
# notes on the sequence are returned
else:
filtered, notes[contig][query_range_tuple] = returned
# Add unfiltered imperfect nt alleles to the database
if float(row['percent_match']) < 100 and not filtered:
# Find the next allele identifier for the database
nt_allele_id = find_next_allele(
gene=base_gene,
allele_path=nt_allele_path
)
# Add the base gene name to the allele identifier
nt_allele = f'{base_gene}_{nt_allele_id}'
# Update the allele database with the new allele
notes[contig][query_range_tuple], nt_allele = update_allele_databases(
query_sequence=nt_querysequence,
header=nt_allele,
filtered=filtered,
gene=base_gene,
report_path=report_path,
allele_path=nt_allele_path,
notes=notes[contig][query_range_tuple],
molecule='Nucleotide'
)
# Add unfiltered novel aa alleles to the database
if not aa_allele and not filtered:
aa_allele_id = find_next_allele(
gene=base_gene,
allele_path=aa_allele_path
)
aa_allele = f'{base_gene}_{aa_allele_id}'
notes[contig][query_range_tuple], aa_allele = update_allele_databases(
query_sequence=aa_querysequence,
header=aa_allele,
filtered=filtered,
gene=base_gene,
report_path=report_path,
allele_path=aa_allele_path,
notes=notes[contig][query_range_tuple],
molecule='Amino acid'
)
# Populate the targetsequence dictionary with information on the nt and aa alleles
if base_gene not in sample.alleles.targetsequence[contig][query_range_tuple]:
sample.alleles.targetsequence[contig][query_range_tuple][base_gene] = {
'nt': {
'allele': nt_allele,
'sequence': nt_querysequence
},
'aa': {
'allele': aa_allele,
'sequence': aa_querysequence
}
}
# Populate the co-location dictionary with the required keys as necessary
if contig not in colocation_dict:
colocation_dict[contig] = {}
# The query_ranges and target keys both correspond to lists of values
if 'query_ranges' not in colocation_dict[contig]:
colocation_dict[contig] = {
'query_ranges': [query_range],
'target': [nt_allele]
}
# If the keys already exist, append to the lists
else:
colocation_dict[contig]['query_ranges'].append(query_range)
colocation_dict[contig]['target'].append(nt_allele)
# Store the BLAST outputs in the MetadataObject
sample.alleles.blastresults = resultdict
# Populate missing results with 'NA' values
if len(resultdict) == 0:
sample.alleles.blastresults = 'NA'
sample.alleles.overlap_dict = colocation_calculation(
colocation_dict=colocation_dict,
gene_names=gene_names,
overlap_range=overlap_range
)
return runmetadata, notes
def translate_sequence(nt_seq: str):
"""
Uses BioPython to translate a nucleotide sequence to protein, and trims it to the first stop
codon
:param nt_seq: String of the nucleotide sequence
:return aa_seq: String of the trimmed amino acid sequence
"""
# Create a sequence object from the nucleotide sequence
nt_seq_object = Seq(nt_seq)
# Translate the sequence to protein
try:
# Translate the sequence
aa_seq_object = nt_seq_object.translate()
# BioPython cannot translate a sequence with gaps (-)
except TranslationError:
allele_seq = str(nt_seq_object).replace('-', '')
seq = Seq(allele_seq)
aa_seq_object = str(seq.translate())
# Split the sting on stop codons, keep only the first part of the split
aa_seq = str(aa_seq_object).split('*', maxsplit=1)[0] + '*'
return str(aa_seq)
def aa_allele_lookup(
aa_seq: str,
gene: str,
aa_allele_path: str,
notes: list):
"""
Read in the amino acid allele file. Search for exact matches to the current sequence
:param aa_seq: String of the amino acid sequence
:param gene: Sting of the gene name
:param aa_allele_path: String of the absolute path to the folder containing the amino acid
allele files
:param notes: List of notes for the current contig: query_range
:return record.id: Allele identifier corresponding to the sequence matching the aa_seq
if no matches:
:return filtered: Boolean of whether the amino acid sequence passes length thresholds
:return notes: Populated notes
"""
# Set the name of the amino acid allele file by joining the allele folder path to the gene name
aa_allele_file = os.path.join(aa_allele_path, f'{gene}.fasta')
# Iterate through all the alleles in the file
for record in SeqIO.parse(aa_allele_file, 'fasta'):
# If the sequence in the file matches the current sequence, return the allele identifier
if aa_seq == str(record.seq):
return record.id
# If no records match, evaluate whether the aa allele passes necessary length thresholds
filtered, notes = evaluate_translated_allele(
aa_seq=aa_seq,
gene=gene,
notes=notes
)
return filtered, notes
def evaluate_translated_allele(
aa_seq: str,
gene: str,
notes: list,
aa=False):
"""
Evaluate whether an aa sequence passes the necessary length thresholds after trimming of an interior stop codons
:param aa_seq: String of the amino acid sequence to evaluate
:param gene: String of the name of the gene (no allele information) being evaluated
:param notes: List of notes for the current contig: query_range
:param aa: Boolean of whether the query sequence is amino acid. Triggers filtering if sequence doesn't end with a
stop codon
:return filtered: Boolean of whether the amino acid sequence passes length thresholds
:return notes: Populated notes
"""
# Dictionary of minimum acceptable lengths for each of the STEC genes
length_dict = {
'ECs2973': 90,
'ECs2974': 316,
'ECs1205': 316,
'ECs1206': 88
}
filtered = False
if not aa_seq.endswith('*'):
notes.append(f'{gene} trimmed sequence did not end with a stop codon')
if aa:
filtered = True
# Remove all sequence after a stop codon (*)
aa_seq = aa_seq.split('*', maxsplit=1)[0] + '*'
# Evaluate the translated length of the sequence
filtered, notes = evaluate_translated_length(
aa_seq=aa_seq,
length_dict=length_dict,
gene=gene,
notes=notes,
filtered=filtered
)
return filtered, notes
def update_allele_databases(
query_sequence: SeqIO.parse,
header: str,
filtered: bool,
gene: str,
report_path: str,
allele_path: str,
notes: list,
molecule: str):
"""
Update the appropriate allele file depending on quality filter status and molecule
:param query_sequence: SEQIO sequence object of the novel allele
:param header: String of the allele name (gene_allele ID)
:param filtered: Boolean of whether the allele has been quality filtered
:param gene: String of the name of the gene
:param report_path: String of the absolute path to the folder into which the reports are to be written
:param allele_path: String of the absolute path to the folder containing the allele database
:param notes: List of notes on the alleles
:param molecule: String of the current molecule. Options are Nucleotide and Amino acid
:return: notes: Updated list of notes
"""
# Create a SeqRecord of the allele using the novel allele name and sequence
new_record = SeqRecord(
seq=Seq(query_sequence),
id=header,
name='',
description=''
)
# Create a string to prepend to allele file names
molecule_str = 'nt' if molecule == 'Nucleotide' else 'aa'
# Set the correct files depending on the filtering status
if not filtered:
new_alleles = os.path.join(report_path, f'{molecule_str}_{gene}_novel_alleles.fasta')
allele_file = os.path.join(allele_path, f'{gene}.fasta')
else:
new_alleles = os.path.join(report_path, f'{molecule_str}_{gene}_filtered_alleles.fasta')
allele_file = os.path.join(allele_path, f'{gene}_filtered.txt')
records = []
# Iterate through all the records in the allele database
if os.path.isfile(allele_file):
for record in SeqIO.parse(allele_file, 'fasta'):
# Append all the records to the list
records.append(record)
# Check to see if the query sequence is novel in the database
if query_sequence not in [str(seq.seq) for seq in records]:
# Append the SeqRecord to the novel alleles file
with open(new_alleles, 'a+', encoding='utf-8') as novel:
SeqIO.write(
sequences=new_record,
handle=novel,
format='fasta'
)
records.append(new_record)
# Overwrite the existing allele database file with the updated list of records
with open(allele_file, 'w', encoding='utf-8') as alleles:
SeqIO.write(
sequences=records,
handle=alleles,
format='fasta'
)
remove_combined_db_files(allele_path=allele_path)
notes.append(f'{molecule} allele {header} is novel')
# Non-novel sequences will have updated notes with the match
else:
for record in records:
if str(query_sequence) == record.seq:
# Append the previous finding to the notes
notes.append(f'{molecule} matches previous result: {record.id}')
# Set the header to the corresponding record.id on a match
header = record.id
return notes, header
def colocation_calculation(
colocation_dict: dict,
gene_names: list,
overlap_range: int):
"""
Determine if gene results are co-located on a contig
:param colocation_dict: Dictionary of contig: {'query_ranges': [query_range],
'target': [allele_id]}
:param gene_names: List of all genes in the analysis
:param overlap_range: Integer of the maximum distance allowed between two separate hits before
they can no longer be considered co-located on a contig
:return overlap_dict: Dictionary of contig:full_range:gene_pair: {'overlap': overlap,
'allele': [allele_identifiers]}
"""
# Initialise a dictionary to store the details of any co-located sequences
overlap_dict = {}
# Iterate over all the contigs with hits
for contig, info_dict in colocation_dict.items():
# Update the overlap dictionary with the contig name as required
if contig not in overlap_dict:
overlap_dict[contig] = {}
# Extract the query range and the allele identifiers from info_dict
query_ranges = info_dict['query_ranges']
targets = info_dict['target']
# Iterate over all the query ranges with hits on the current contig
for query_iterator, query_range in enumerate(query_ranges):
# Create a variable to track whether the current contig:query range combination has
# been added to the overlap dictionary
processed = False
# Extract the name of the current allele from the list of all alleles in the range
current_allele = targets[query_iterator]
# Create a dictionary of tuple of other ranges present in the list of ranges: iterator
other_ranges = {
tuple(other): j for j, other in enumerate(query_ranges) if other != query_range
}
# Iterate over these other ranges
for other_range, other_iterator in other_ranges.items():
# Calculate whether the current range overlaps with this other range
# e.g. query_range = (100, 500), other_range = (525, 1000), overlap_range = 50
# Check if 500 + 50 >= 525 and 1000 + 50 >= 100
overlap = query_range[1] + overlap_range >= other_range[0] and \
other_range[1] + overlap_range >= query_range[0]
# If these ranges overlap, populate the overlap dictionary
if overlap:
overlap_dict, processed = positive_overlap(
info_dict=info_dict,
other_iterator=other_iterator,
query_range=query_range,
other_range=other_range,
overlap_dict=overlap_dict,
current_allele=current_allele,
contig=contig,
gene_names=gene_names,
overlap=overlap
)
# If the current contig: range was not entered into the overlap dictionary, there were
# either no other hits, or the hits did not overlap
if not processed:
# Create a tuple containing only the current range
tuple_range = tuple(query_range)
# Update the dictionary as required
if tuple_range not in overlap_dict[contig]:
overlap_dict[contig][tuple_range] = {}
# Extract the gene name corresponding to the allele identifier
# e.g. gene = ECs1206 allele = ECs1206_138 will create ECs1206
gene = [gene_name for gene_name in gene_names if gene_name in current_allele][0]
# Add the gene name to the dictionary, and update create the overlap and allele keys
if gene not in overlap_dict[contig][tuple_range]:
overlap_dict[contig][tuple_range][gene] = {
'overlap': False,
'allele': []
}
# Append the current allele identifier to the list of alleles
overlap_dict[contig][tuple_range][gene]['allele'].append(current_allele)
return overlap_dict
def positive_overlap(
info_dict: dict,
other_iterator: int,
query_range: range,
other_range: range,
overlap_dict: dict,
current_allele: str,
contig: str,
gene_names: list,
overlap: bool):
"""
Determine the combined range of two overlapping ranges, extract gene names corresponding to
allele names, populate dictionary of range overlaps
:param info_dict: Dictionary of {'query_ranges': [query_range], 'target': [allele_id]}
:param other_iterator: Integer of the iterator corresponding to the current other_range from the
dictionary of other_range: iterator
:param query_range: Range of hit corresponding to current_allele in info_dict
e.g. info_dict['query_ranges'][query_iterator] and info_dict['target'][query_iterator]
:param other_range: Range of hit corresponding to non-current allele in info_dict
e.g. info_dict['query_ranges'][other_iterator] and info_dict['target'][other_iterator]
:param overlap_dict: Dictionary of to be populated with overlap information
e.g. contig:full_range:gene_pair: {'overlap': overlap, 'allele': [allele_identifiers]}
:param current_allele: String of the name of the allele extracted from info_dict
e.g. info_dict['target'][query_iterator]
:param contig: Name of the contig within which the BLAST hits are located
:param gene_names: List of all gene names in the analysis
:param overlap: Boolean of whether query_range overlaps with other_range
"""
# Extract the name of the other allele from info_dict using the iterator of the other range
other_allele = info_dict['target'][other_iterator]
full_range = calculate_full_range(
query_range=query_range,
other_range=other_range
)
# Update the overlap dictionary with the full range as required
if full_range not in overlap_dict[contig]:
overlap_dict[contig][full_range] = {}
# Create a sorted tuple of the allele names
alleles = tuple(sorted([current_allele, other_allele]))
# Create a set of all the genes with hits in the current overlap
genes = set()
# Iterate over all the alleles
for allele in alleles:
# Add the gene from the list of genes if it is present in the allele
# identifier e.g. gene = ECs1206 allele = ECs1206_138 will add ECs1206
genes.add([gene for gene in gene_names if gene in allele][0])
# Create a tuple of the sorted list of genes present in the set
gene_pair = tuple(sorted(list(genes)))
# Update the dictionary as required
if gene_pair not in overlap_dict[contig][full_range]:
overlap_dict[contig][full_range][gene_pair] = {
'overlap': overlap,
'allele': []
}
# Append the current allele to the overlap dictionary
overlap_dict[contig][full_range][gene_pair]['allele'].append(current_allele)
# Set processed to True to indicate that there was an overlap and that the
# dictionary was populated
processed = True
return overlap_dict, processed
def calculate_full_range(
query_range: range,
other_range: range):
"""
Determine if two ranges overlap
:param query_range: Range of hit corresponding to current_allele
:param other_range: Range of hit corresponding to non-current allele
:return full_range: Tuple of minimum coordinate from both ranges, maximum coordinate from
both ranges
"""
# Determine in the minimum and maximum coordinates of the two ranges
# e.g. query_range = (100, 500), other_range = (525, 1000)
# min_range = 100
min_range = (min(query_range[0], other_range[0]))
# max_range = 1000
max_range = (max(query_range[1], other_range[1]))
# The full range is a tuple of (min_range, max_range)
# full_range = (100, 1000)
full_range = tuple(sorted([min_range, max_range]))
return full_range
def evaluate_translated_length(
aa_seq: str,
length_dict: dict,
gene: str,
notes: list,
filtered: bool):
"""
Evaluate whether a translated sequence passes a length filter and starts with a methionine
residue
:param aa_seq: String of the amino acid sequence to evaluate
:param length_dict: Dictionary of minimum acceptable length for each gene in the analysis
:param gene: String of the name of the gene
:param notes: List of notes on the alleles
:param filtered: Boolean of whether the allele has been filtered based on length or content
:return: filtered: Updated filtering boolean
:return: notes: Updated list of notes
"""
# Proper protein sequences must start with a methionine (M)
if not aa_seq.startswith('M'):
filtered = True
notes.append(f'{gene} amino acid sequence does not start with M')
# The length of the sequence must also be greater than the minimum gene-specific length
if len(aa_seq) < length_dict[gene]:
filtered = True
notes.append(
f'{gene} amino acid sequence was {len(aa_seq)} amino acid residues. Minimum allowed '
f'length is {length_dict[gene]} amino acid residues')
return filtered, notes
def generic_evaluate_translated_length(
aa_seq: str,
sequence: str,
gene: str,
notes: list,
filtered: bool,
cutoff=0.95):
"""
Evaluate whether a translated sequence passes a generic length filter and starts with a methionine
residue
:param aa_seq: String of the amino acid sequence to evaluate
:param sequence: String of untrimmed nucleotide sequence
:param gene: String of the name of the gene
:param notes: List of notes on the alleles
:param filtered: Boolean of whether the allele has been filtered based on length or content
:param cutoff: Float of minimum cutoff value to be used for filtering trimmed sequences. Default is 0.95
:return: filtered: Updated filtering boolean
:return: notes: Updated list of notes
"""
# Proper protein sequences must start with a methionine (M)
if not aa_seq.startswith('M'):
filtered = True
notes.append(f'{gene} amino acid sequence does not start with M')
# Minimum length of a trimmed amino acid allele permitted is 95% the length of the theoretical length of
# the translated nucleotide sequence e.g. a 99 bp nt sequence would be 33 amino acid residues, and 95% of
# that is 31.35 -> 31 (rounded down)
minimum_length = math.floor(len(sequence) / 3 * cutoff)
aa_seq_length = len(aa_seq)
if aa_seq_length < minimum_length:
filtered = True
notes.append(
f'{gene} amino acid sequence was trimmed to {aa_seq_length} residues '
f'the minimum length allowed is {minimum_length} residues')
return filtered, notes
def find_next_allele(
gene: str,
allele_path: str,
extension='.fasta'):
"""
Update the allele database with the novel allele extracted above
:param gene: Name of the current gene being examined
:param allele_path: Name and absolute path to folder containing allele files
:param extension: String of the file extension. Default is .fasta
:return: last_id: Number of the last alleles in the current database
"""
# Find the allele database file
allele_file = os.path.join(allele_path, f'{gene}{extension}')
# Initialise a variable to store the name of the last allele in the database file
last_id = int()
records = []
if os.path.isfile(allele_file):
# Iterate through all the records in the allele database
for record in SeqIO.parse(allele_file, 'fasta'):
# Update the last_id variable
last_id = int(record.id.split('_')[-1])
records.append(record)
else:
last_id = 0
# Make it clear that these are novel profiles by starting at 1000000
if last_id < 1000000:
last_id = 999999
return last_id + 1
def remove_combined_db_files(allele_path: str):
"""
Remove all the combined gene files used in BLAST analyses
:param allele_path: String of the absolute path to the folder containing the alleles
"""
# Find all the files in the directory with the word combined in the name
combined_files = glob(os.path.join(allele_path, 'combined*'))
# Remove each of the files
for file in combined_files:
os.remove(file)
def create_nt_allele_comprehension(
runmetadata: MetadataObject,
gene_names: list):
"""
Create gene: nucleotide allele ID comprehensions for each contig: range combination with hits
:param runmetadata: MetadataObject with list of GenObjects for each query
:param gene_names: List of all gene names in the analysis
:return: allele_comprehension: nucleotide allele comprehension. allele_comprehension[contig][full_range] =
{gene:allele}
"""
logging.info('Determining nucleotide allele profiles')
# Initialise an empty allele comprehension dictionary
allele_comprehension = {}
# Iterate through all the samples
for sample in runmetadata.samples:
# Extract hit information from the overlap dictionary
for contig, range_dict in sample.alleles.overlap_dict.items():
# Update the allele comprehension dictionary with the contig key as required
if contig not in allele_comprehension:
allele_comprehension[contig] = {}
# Iterate through each query range with a hit in the current contig
for query_range, gene_dict in range_dict.items():
# Update the dictionary with the query range key as required
if query_range not in allele_comprehension[contig]:
allele_comprehension[contig][query_range] = {}
# Iterate over each gene with a hit within this contig:query_range combination
for gene_pair, info_dict in gene_dict.items():
# If the gene_pair variable is a string (instead of a tuple), there is only a single gene present
if isinstance(gene_pair, str):
# Extract the gene_allele ID from the dictionary
corresponding_allele = info_dict['allele'][0]
# Remove the gene name (and an underscore) from the corresponding_allele variable
# (leaving only the allele ID)
allele_number = corresponding_allele.replace(f'{gene_pair}_', '')
# Update the allele comprehension dictionary with the gene name: alleleID
allele_comprehension[contig][query_range].update(
{gene_pair: allele_number}
)
# Determine which gene(s) are missing from this contig:query_range
missing = [other_gene for other_gene in gene_names if other_gene != gene_pair]
# Populate the allele comprehension dictionary with the missing genes
for missing_gene in missing:
allele_comprehension[contig][query_range].update({missing_gene: '0'})
# A tuple of gene names indicates that multiple co-located genes are present in this
# contig:query_range combination
else:
# Iterate over each gene in the gene_pair tuple
for i, gene_name in enumerate(gene_pair):
# Extract the gene_alleleID from the dictionary for this gene
corresponding_allele = info_dict['allele'][i]
# Remove the gene name information from the corresponding_allele variable
if gene_name in corresponding_allele:
allele_number = corresponding_allele.replace(f'{gene_name}_', '')
# Update the dictionary with the new gene: allele number for the sample
allele_comprehension[contig][query_range].update(
{gene_name: allele_number}
)
else:
# Determine which gene(s) are not currently being examined
corresponding_gene = [
other_gene for other_gene in gene_names if other_gene != gene_name
][0]
allele_number = corresponding_allele.replace(f'{corresponding_gene}_', '')
# Update the dictionary with the new gene: allele number for the sample
allele_comprehension[contig][query_range].update(
{corresponding_gene: allele_number}
)
# If the allele_comprehension dictionary exists, it doesn't need to be further populated
if allele_comprehension:
continue
# Otherwise iterate through the targetsequence dictionary
for contig, range_dict in sample.alleles.targetsequence.items():
# Update the allele comprehension dictionary as required
if contig not in allele_comprehension:
allele_comprehension[contig] = {}
# Iterate through all genes in the analysis
for gene in gene_names:
# Set an 'empty' range as (0, 0)
full_range = (0, 0)
# Add the range to the dictionary as required
if full_range not in allele_comprehension[contig]:
allele_comprehension[contig][full_range] = {}
# Update the dictionary with the negative result
allele_comprehension[contig][full_range].update(
{gene: '0'}
)
return allele_comprehension
def create_aa_allele_comprehension(
runmetadata: MetadataObject,
gene_names: list):
"""
Create gene: amino acid allele ID comprehensions for each contig: range combination with hits
:param runmetadata: MetadataObject with list of GenObjects for each query
:param gene_names: List of all gene names in the analysis
:return: allele_comprehension: amino acid allele comprehension. allele_comprehension[contig][full_range] =
{gene:allele}
"""
logging.info('Determining amino acid allele profiles')
# Initialise a dictionary to store contig:query_range gene:alleleID results
allele_comprehension = {}
# Iterate through all the samples
for sample in runmetadata.samples:
# Iterate through all the contigs in the targetsequence dictionary
for contig, range_dict in sample.alleles.targetsequence.items():
# Update the dictionary as required
if contig not in allele_comprehension:
allele_comprehension[contig] = {}
# If the current contig is not in the overlap dictionary, populate the allele comprehension with
# negative values
if contig not in sample.alleles.overlap_dict:
# Iterate over every gene in the analysis
for gene in gene_names:
# Set the 'empty' value to (0, 0)
full_range = (0, 0)
# Update the dictionary with the negative values
if full_range not in allele_comprehension[contig]:
allele_comprehension[contig][full_range] = {}
allele_comprehension[contig][full_range].update(
{gene: '0'}
)
# The dictionary has been populated, so continue
continue
# Extract all the ranges with hits in the overlap dictionary
for full_range in sample.alleles.overlap_dict[contig]:
# Extract the query ranges with hits on the current contig in the targetsequence dictionary
for query_range, gene_dict in range_dict.items():
# Determine if these two ranges have an overlap
overlap = query_range[1] >= full_range[0] and full_range[1] >= query_range[0]
# If they do not overlap, they are not the same
if not overlap:
continue
# Update the dictionary as required
if full_range not in allele_comprehension[contig]:
allele_comprehension[contig][full_range] = {}
# Create a list to store all genes in the analysis that do not have hits in the range
missing_genes = []
# Iterate over all genes in the analysis
for gene in gene_names:
# If the gene is not in the gene dictionary contained in the targetsequence dictionary,
# add it to the list of missing genes
if gene not in gene_dict:
missing_genes.append(gene)
# Otherwise, update the dictionary
else:
# Extract the name of the gene:alleleID from gene_dict in the targetsequence dictionary
full_allele = gene_dict[gene]['aa']['allele']
# Remove the gene name information from the full_allele variable
allele_number = full_allele.replace(f'{gene}_', '')
# Add the gene:alleleID to the dictionary
allele_comprehension[contig][full_range].update(
{gene: allele_number}
)
# Add any missing genes to the dictionary with negative values
for gene in missing_genes:
# Ensure that the gene isn't already present in the dictionary
if gene not in allele_comprehension[contig][full_range]:
allele_comprehension[contig][full_range].update(
{gene: '0'}
)
return allele_comprehension
def create_frozen_allele_comprehension(allele_comprehension: dict):
"""
Freeze allele comprehension dictionaries
:param allele_comprehension: Dictionary of contig:full_range: {gene:allele}
:return: frozen_allele_comprehension: Dictionary of contig:query_range: json.dumps({gene:allele}, sort_keys=True)
"""
# Initialise a dictionary to store the frozen allele comprehensions
frozen_allele_comprehension = {}
# Iterate over all the contigs with hits
for contig, query_dict in allele_comprehension.items():
# Update the frozen allele dictionary as required
if contig not in frozen_allele_comprehension:
frozen_allele_comprehension[contig] = {}
# Iterate over all the ranges and allele comprehensions on the current contig with hits
for query_range, allele_dict in query_dict.items():
# Freeze the allele comprehension
frozen_allele_dict = json.dumps(allele_dict, sort_keys=True)
# Update the dictionary with the range and the frozen allele string
if query_range not in frozen_allele_comprehension[contig]:
frozen_allele_comprehension[contig][query_range] = frozen_allele_dict
return frozen_allele_comprehension
def extract_novel_alleles(
sample: MetadataObject,
gene: str,
genome_query: bool,
amino_acid: bool,
allele_path: str,
report_path: str,
cutoff=75):
"""
Extract the sequence of novel alleles from samples that do not have a 100% match
:param sample: MetadataObject with list of GenObjects for each query
:param gene: Name of current gene
:param genome_query: Boolean of whether the allele or the genome are the query
:param amino_acid: Variable indicating whether the current analyses are on DNA or
amino acid sequences
:param allele_path: Name and absolute path to folder containing allele files
:param report_path: Name and absolute path to folder in which reports are to be created
:param cutoff: The minimum percent identity cutoff to allow when considering the presence of a
sequence in a query
:return: sample: Updated sample
:return: novel_allele: Name of novel alleles discovered
:return: query_sequence: Sequence of novel alleles discovered
"""
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(sample.alleles.blast_report, encoding='utf-8'),
dialect='excel-tab')
# Initialise the best hit value of 0
best_hit = 0
# Initialise strings to store the name and the sequence of novel alleles
query_sequence = str()
novel_allele = str()
# Iterate through all the BLAST hits
for row in blastdict:
# Extract the target id with the appropriate key depending on whether genome files
# are the query or the subject
target_id = row['query_id'] if not genome_query else row['subject_id']
# Ensure that the gene name is present in the gene name + allele combination
if gene in target_id:
# Create a variable to store the value for percent identity, so it is easier to call
perc_id = float(row['percent_match'])
# See if the percent identity for the current match is better than the previous best
# match, and is above the
# minimum cutoff threshold
if perc_id > best_hit and perc_id >= cutoff:
# Set the start and end variables depending on whether genomes are the query
target_start = row['query_start'] if not genome_query else row['subject_start']
target_end = row['query_end'] if not genome_query else row['subject_end']
target_seq = row['query_sequence']
# Determine if the orientation of the sequence is reversed compared to the reference
if int(target_end) < int(target_start) and not amino_acid:
# Create a sequence object using BioPython
seq = Seq(target_seq)
# Calculate the reverse complement of the sequence
query_sequence = str(seq.reverse_complement())
# If the sequence is not reversed, use the sequence as it is in the output
else:
query_sequence = target_seq
best_hit = perc_id
# If a query sequence was extracted, use it to update the allele database
if query_sequence:
novel_allele = update_allele_database(
gene=gene,
query_sequence=query_sequence,
allele_path=allele_path,
report_path=report_path,
amino_acid=amino_acid,
)
return sample, novel_allele, query_sequence
def update_allele_database(
gene: str,
query_sequence: str,
allele_path: str,
report_path: str,
amino_acid: bool):
"""
Update the allele database with the novel allele extracted above
:param gene: Name of the current gene being examined
:param query_sequence: Sequence of the novel allele
:param allele_path: Name and absolute path to folder containing allele files
:param report_path: Name and absolute path to folder in which reports are to be created
:param amino_acid: Variable indicating whether the current analyses are on DNA or
amino acid sequences
:return: novel_allele: Name of the novel allele entered into the database
"""
# Find the allele database file
allele_file = glob(os.path.join(allele_path, f'{gene}*.*fa*'))[0]
# Set the appropriate molecule type based on the current analysis
molecule = 'nt' if not amino_acid else 'aa'
# Set the name of the novel allele file in the report path
new_alleles = os.path.join(report_path, f'{molecule}_{gene}_novel_alleles.fasta')
# Initialise a variable to store the name of the last allele in the database file
last_id = str()
# Create a list to store all the allele records in the database
records = []
# Iterate through all the records in the allele database
for record in SeqIO.parse(allele_file, 'fasta'):
# Add the records to the list
records.append(record)
# Update the last_id variable
last_id = record.id
# Try to separate the gene name from the allele e.g. MutS_1
try:
_, allele = last_id.rsplit('_', 1)
# If there is no allele, set the allele to 1
except ValueError:
allele = 1
# Typecase the variable to an integer
allele = int(allele)
# If the sequence type corresponds to an Enterobase number, use our local numbering scheme instead
if allele < 1000000:
allele = 999999
# Name the novel allele as the gene name _ allele number + 1
novel_allele = f'{gene}_{int(allele) + 1}'
# Create a SeqRecord of the allele using the novel allele name and sequence
new_record = SeqRecord(
seq=Seq(query_sequence),
id=novel_allele,
name='',
description=''
)
# Append the SeqRecord to the novel alleles file
with open(new_alleles, 'a+', encoding='utf-8') as novel:
SeqIO.write(
sequences=new_record,
handle=novel,
format='fasta'
)
# Add the novel allele record to the list of all records
records.append(new_record)
# Overwrite the existing allele database file with the updated list of records
with open(allele_file, 'w', encoding='utf-8') as alleles:
SeqIO.write(
sequences=records,
handle=alleles,
format='fasta'
)
return novel_allele
def translate(runmetadata: MetadataObject):
"""
Use BioPython to translate DNA to amino acid
:param runmetadata: MetadataObject with list of GenObjects for each query
:return: Updated MetadataObject
"""
logging.info('Translating allele sequences to amino acid')
for sample in runmetadata.samples:
# Initialise the dictionary to store the translated sequence
sample.alleles.nt_alleles_translated = {}
for allele, allele_sequence_list in sample.alleles.targetsequence.items():
for allele_sequence in allele_sequence_list:
# Create a sequence object using BioPython
seq = Seq(allele_sequence)
try:
# Translate the sequence
aa_seq = str(seq.translate())
# BioPython cannot translate sequences with gaps (-)
except TranslationError:
# Remove all - from the sequence
allele_seq = allele_sequence.replace('-', '')
seq = Seq(allele_seq)
aa_seq = str(seq.translate())
# Ensure that the allele name exists (isn't an empty string) before adding
# allele name: translated sequence to the dictionary
if allele:
sample.alleles.nt_alleles_translated[allele] = aa_seq
return runmetadata
def match_profile(
profile_data: dict,
frozen_allele_comprehension: dict,
report_path: str,
profile_file: str,
genes: list,
allele_comprehension: dict,
molecule: str):
"""
Match current profiles to any previously created profiles
:param profile_data: Dictionary of seq_type: {gene name: allele ID}
:param frozen_allele_comprehension: Dictionary of json.dumps({gene name: allele ID}, sort_keys=True): seq_type
:param report_path: Name and absolute path to folder in which reports are to be created
:param profile_file: Name and path of file containing reduced profiles
:param genes: List of all genes in the analysis
:param allele_comprehension: Dictionary of contig:full_range: {gene:allele}
:param molecule: String of the current molecule being processed. Options are "aa" and "nt"
:return: profile_matches: Dictionary of contig:query_range:seq_type_match
"""
# If the profile_data dictionary was not populated in the read_profiles methods,
# there is nothing to match
if not profile_data:
return
logging.info('Matching new %s profiles against profile file', molecule)
profile_matches = {}
# Extract all the profiles from the profile file (as a frozen string)
frozen_profiles = freeze_profiles(
profile_data=profile_data
)
# Iterate over all the contigs with hits
for contig, query_dict in frozen_allele_comprehension.items():
# Iterate over the query ranges with hits on the current contig
for query_range, frozen_allele_dict in query_dict.items():
try:
# Extract the samples that match this profile
seq_type_match = frozen_profiles[frozen_allele_dict]
# Update the dictionary with the matching samples
if contig not in profile_matches:
profile_matches[contig] = {}
if query_range not in profile_matches[contig]:
profile_matches[contig][query_range] = seq_type_match
# The profile will not necessarily match any of the profiles found in the analysis
except KeyError:
if contig not in profile_matches:
profile_matches[contig] = {}
# Update the profile file with this novel profile
profile_matches[contig][query_range] = update_profiles(
profile_file=profile_file,
report_path=report_path,
genes=genes,
allele_dict=allele_comprehension[contig][query_range],
molecule=molecule
)
# Update the profile_data dictionary with the new sequence type
profile_data[profile_matches[contig][query_range]] = allele_comprehension[contig][query_range]
frozen_profiles = freeze_profiles(
profile_data=profile_data
)
return profile_matches, frozen_profiles
def freeze_profiles(profile_data: dict):
"""
Freeze profiles, so that the frozen {gene:allele} dictionary can be used as the key and the corresponding sequence
type as the value
:param profile_data: Dictionary of all profiles in seq_type: {gene name: allele ID} format
:return: frozen_profiles: Dictionary of json.dumps({gene name: allele ID}, sort_keys=True): seq_type
"""
# Initialise a dictionary to store the frozen profiles information
frozen_profiles = {}
# Iterate over all the sequence type: {gene name: allele ID} pairs in the dictionary
for seq_type, allele_comprehension in profile_data.items():
# Freeze the allele comprehension
frozen_allele_comprehension = json.dumps(allele_comprehension, sort_keys=True)
# Populate the dictionary with frozen_allele_comprehension: seq_type
frozen_profiles[frozen_allele_comprehension] = seq_type
return frozen_profiles
def update_profiles(
profile_file: str,
report_path: str,
genes: list,
allele_dict: dict,
molecule: str):
"""
Run methods to add novel profiles to the profile file. Determine the sequence type to use, and update the file
:param profile_file: Name and path of file containing reduced profiles
:param report_path: Name and absolute path to folder in which reports are to be created
:param genes: List of all genes in the analysis
:param allele_dict: Dictionary of a single allele comprehension. Extracted from
allele_comprehension[contig][query_range] = {gene: allele}
:param molecule: String of the current molecule being processed. Options are "aa" and "nt"
:return: next_seq_type: Integer of the sequence type assigned to the novel profile
"""
# Extract the sequence type to use for the novel profile
next_seq_type = return_next_seq_type(
profile_file=profile_file
)
# Update the profile file
created = update_profile_file(
profile_file=profile_file,
next_seq_type=next_seq_type,
allele_dict=allele_dict,
genes=genes,
report_path=report_path,
molecule=molecule
)
if not created:
next_seq_type = 'N/A'
return next_seq_type
def return_next_seq_type(profile_file: str):
"""
Parse the profile file, and return the value for the next sequence type to be used. Local profiles will start at
1000000 in order to be distinct from Enterobase profiles
:param profile_file: Name and path of file containing reduced profiles
:return: last_seq_type + 1: Integer of the sequence type to be assigned to the novel profile
"""
# Open the profile file
with open(profile_file, 'r', encoding='utf-8') as profile:
# Create a list of all the lines in the file
lines = profile.read().splitlines()
# Extract the last value from the list of lines
last_line = lines[-1]
# Split the line on tabs, and set the last_seq_type variable to the first entry e.g. 22\t3\t2\n yields a
# sequence type of 22
last_seq_type = last_line.split('\t')[0]
# Typecase the variable to an integer
int_last_seq_type = int(last_seq_type)
# If the sequence type corresponds to an Enterobase number, use our local numbering scheme instead
if int_last_seq_type < 1000000:
int_last_seq_type = 999999
# Return the last sequence type + 1 to give the next sequence type
return int_last_seq_type + 1
def update_profile_file(
profile_file: str,
next_seq_type: int,
allele_dict: dict,
genes: list,
report_path: str,
molecule: str):
"""
Update the profile file with novel profile. Additionally, either create or update the novel_profiles.txt file
with the same profile
:param profile_file: Name and path of file containing reduced profiles
:param next_seq_type: Integer of the sequence type to be assigned to the novel profile
:param allele_dict: Dictionary of a single allele comprehension. Extracted from
allele_comprehension[contig][query_range] = {gene: allele}
:param genes: List of all genes in the analysis
:param report_path: Name and absolute path to folder in which reports are to be created
:param molecule: String of the current molecule being processed. Options are "aa" and "nt"
:return bool: Boolean of whether the profile could be created or not
"""
# Initialise a string to store the profile information with the novel sequence type
seq_type_str = f'{next_seq_type}'
# Initialise a header to store 'ST gene1 gene2.......geneX\n'
header = 'ST'
# Iterate over all the genes in the analysis
for gene in genes:
# Extract the allele ID for each gene in the analysis
allele = allele_dict[gene]
# If the allele has been filtered return False, as a sequence type should not exist for filtered alleles
if not allele:
return False
# Check if the gene name is in the allele
if gene in allele:
# Extract the allele number
allele = allele.split('_')[-1]
# Update the header with the gene
header += f'\t{gene}'
# Update the profile string with the allele ID
seq_type_str += f'\t{allele}'
# Open the profile file (to update) and write the novel profile
with open(profile_file, 'a+', encoding='utf-8') as profile:
profile.write(seq_type_str + '\n')
# Set the name of the file containing novel profiles using the molecule variable ('aa' or 'nt')
novel_profile_file = os.path.join(report_path, f'{molecule}_novel_profiles.txt')
# Check to see if the novel profile file exists
if not os.path.isfile(novel_profile_file):
# If it does not exist, create it, and write the header line before the novel profile
with open(novel_profile_file, 'w', encoding='utf-8') as novel_profile:
novel_profile.write(header + '\n')
novel_profile.write(seq_type_str + '\n')
# Otherwise, update the existing file with the novel profile
else:
with open(novel_profile_file, 'a+', encoding='utf-8') as novel_profile:
novel_profile.write(seq_type_str + '\n')
return True
def create_stec_report(
runmetadata: MetadataObject,
nt_profile_matches: dict,
nt_alleles: dict,
aa_profile_matches: dict,
aa_alleles: dict,
report_file: str,
gene_names: list,
aa_profile_path: str,
notes: list):
"""
Create a STEC-specific report including the allele matches for each gene and sequence type for both nucleotide and
amino acid sequence information
:param runmetadata: MetadataObject with list of GenObjects for each query
:param nt_profile_matches: Dictionary of contig:query_range:nucleotide seq_type_match
:param nt_alleles: Dictionary of nucleotide contig:full_range:nucleotide seq_type_match
:param aa_profile_matches: Dictionary of contig:query_range:amino acid seq_type_match
:param aa_alleles: Dictionary of amino acid contig:full_range: {gene:allele}
:param report_file: String of the name and path of the report file
:param gene_names: List of all gene names in the analysis
:param aa_profile_path: String of the absolute path of the folder in which the amino acid profile file is located
:param notes: List of notes on the alleles
"""
logging.info('Creating report')
# Set the appropriate order for the genes in the report (stx1 genes are not in numerical order)
gene_order = {
'stx1': ['ECs2974', 'ECs2973'],
'stx2': ['ECs1205', 'ECs1206']
}
# Create a list to store the ordered genes
ordered_genes = []
# Set the ordered genes according to the genes used in the current analysis (stx1 or stx2)
for _, gene_list in gene_order.items():
# If the sorted list matches the list of genes in the analysis, use the unsorted list as the gene order
if sorted(gene_list) == gene_names:
ordered_genes = gene_list
# Create a header for the report. Includes which alleles are present and the sequence type for both the nucleotide
# and amino acid sequences of the query and notes
header = f'Sample\tnt_{ordered_genes[0]}\tnt_{ordered_genes[1]}\tnt_seq_type\t' \
f'aa_{ordered_genes[0]}\taa_{ordered_genes[1]}\taa_seq_type\tnotes\n'
# Create a string to store the query information
data = str()
# Iterate over the samples
for sample in runmetadata.samples:
# Iterate over all the contigs that had hits
for contig, range_dict in nt_profile_matches.items():
# Iterate over the ranges: nucleotide profiles that had hits on this contig
for query_range, nt_profile in range_dict.items():
# Update the data string with the sample name
data += f'{sample.name}\t'
# Extract the corresponding amino acid profile from aa_profile_matches
aa_profile = aa_profile_matches[contig][query_range]
# Extract the allele dictionaries ({gene name: allele ID}) using the contig and query range
nt_allele_dict = nt_alleles[contig][query_range]
aa_allele_dict = aa_alleles[contig][query_range]
# Iterate over the genes in the analysis to extract their corresponding nucleotide alleles
for gene in ordered_genes:
# Update the string with the nucleotide allele ID
data += f'{nt_allele_dict[gene]}\t'
# Update the string with the nucleotide sequence type
data += f'{nt_profile}\t'
# Iterate over the genes in the analysis to extract their corresponding amino acid alleles
for gene in ordered_genes:
# Update the string with the amino acid allele ID
data += f'{aa_allele_dict[gene]}\t'
# Update the string with the amino acid sequence type
data += f'{aa_profile}\t'
# Create a list to store sample:contig:query_range-specific notes
note_list = []
# Determine if there are already notes for this contig in the notes dictionary
if contig in notes:
# Determine if there are already notes for this contig: range in the notes dictionary
if query_range in notes[contig]:
# Update the profile linking file. Use notes in the notes dictionary
note_list = update_profile_link_file(
nt_seq_type=nt_profile,
aa_seq_type=aa_profile,
aa_profile_path=aa_profile_path,
note=notes[contig][query_range]
)
# If there are no notes for the contig:range, create notes from scratch
else:
# Update the profile linking file. Use notes in the notes_list list
note_list = update_profile_link_file(
nt_seq_type=nt_profile,
aa_seq_type=aa_profile,
aa_profile_path=aa_profile_path,
note=note_list
)
# If there are no notes for the contig, create notes from scratch
else:
# Update the profile linking file. Use notes in the notes_list list
note_list = update_profile_link_file(
nt_seq_type=nt_profile,
aa_seq_type=aa_profile,
aa_profile_path=aa_profile_path,
note=note_list
)
# Join all the notes from the list with semicolons
note_str = '; '.join(note_list)
# Update the data string with the notes
data += f'{note_str}'
# Add a newline to the data string
data += '\n'
# If there were no hits for the sample, add negative values to the data string
if not data:
data = f'{sample.name}\t0\t0\t1\t0\t0\t1\n'
# If the report file does not already exist, write the header and data strings
if not os.path.isfile(report_file):
with open(report_file, 'w', encoding='utf-8') as report:
report.write(header)
report.write(data)
# If the report already exists, write only the data string
else:
with open(report_file, 'a+', encoding='utf-8') as report:
report.write(data)
def update_profile_link_file(
nt_seq_type: str,
aa_seq_type: str,
note: list,
aa_profile_path: str):
"""
Update the file linking amino acid sequence type to the (multiple) corresponding nucleotide sequence type(s)
:param nt_seq_type: String of the nucleotide sequence type
:param aa_seq_type: String of the amino acid sequence type
:param note: List of notes on the alleles
:param aa_profile_path: String of the absolute path of the folder in which the amino acid profile file is located
:return: note: Update list of notes
"""
# Set the name of the link file
link_file = os.path.join(aa_profile_path, 'aa_nt_profile_links.tsv')
# Create a dictionary of nucleotide sequence type matches
nt_match = {}
# Initialise a boolean to track whether the amino acid sequence type is already present in the profile link file
aa_match = False
# Initialise a dictionary to store the aa_seq_type: nt_seq_type(s)
links = {}
# Initialise a list of all amino acid sequence types present the file
records = []
# Open the profile link file to read in the contents
with open(link_file, 'r', encoding='utf-8') as profile_link:
for line in profile_link:
# Split the amino acid sequence type from the nucleotide sequence type(s)
# e.g 1 1 or 116 116;125;187;39973;92286;1000005
aa_seq, nt_seq = line.split('\t')
# Check to see if the extracted amino acid sequence type matches the sequence type of the sample
if aa_seq_type == aa_seq:
# Set the match boolean to True (there is a match)
aa_match = True
# Check to see if the nucleotide sequence type of sample is in the semicolon-separated list of
# nucleotide sequence types corresponding to the amino acid sequence type
if nt_seq_type in nt_seq.rstrip().split(';'):
# Update the nucleotide sequence type match dictionary
nt_match[aa_seq] = nt_seq.rstrip()
# Update the link dictionary
links[aa_seq] = nt_seq.rstrip()
# Add the amino acid sequence type to the list
records.append(aa_seq)
# Check if the amino acid of the sample matched a previous sequence type
if aa_match:
# Check if there was a match of the sample's nucleotide sequence type
if not nt_match:
# Append the nucleotide sequence type to the string of nucleotide sequence type matches
links[aa_seq_type] += f';{nt_seq_type}'
# Update the note
note.append(f'Novel nt_seq_type {nt_seq_type} links to aa_seq type {aa_seq_type}')
# If no match, this is a novel amino acid sequence type
else:
# Update the link dictionary novel amino acid sequence type: novel nucleotide sequence type
links[aa_seq_type] = nt_seq_type
# Add the novel sequence type to the list
records.append(aa_seq_type)
# Update the notes
note.append(f'Novel nt_seq_type {nt_seq_type}, and aa_seq_type {aa_seq_type}')
# Overwrite the profile link file with the updated links
with open(link_file, 'w', encoding='utf-8') as profile_link:
for record in records:
profile_link.write(f'{record}\t{links[record]}\n')
return note
def split_alleles(
allele_files: list,
output_path: str):
"""
Split FASTA files into individual sequences
:param allele_files: List of absolute path to FASTA-formatted allele sequence files
:param output_path: String of the absolute path into which the individual sequence files are to be written
"""
# Create the output path if it doesn't already exist
make_path(inpath=output_path)
# Ensure that the path could be created
if not os.path.isdir(output_path):
logging.error('Could not create desired output folder: %s', output_path)
raise SystemExit
# Iterate over all the allele files
for allele_file in allele_files:
# Use SeqIO to load all the records in the file
for record in SeqIO.parse(allele_file, 'fasta'):
# Set the name of the file to be the FASTA header
output_file = os.path.join(output_path, f'{record.id}.fasta')
# Write the record to the new file
with open(output_file, 'w', encoding='utf-8') as output:
SeqIO.write(record, output, 'fasta')
def parse_aa_blast(
runmetadata: MetadataObject,
extended_fieldnames: list,
fieldnames: list,
gene_names: list,
notes: list,
aa_allele_path: str,
report_path: str,
cutoff: int):
"""
Parse amino acid BLAST results
:param runmetadata: MetadataObject with list of GenObjects for each query
:param extended_fieldnames: List of the BLAST fields used, as well as the additional percent
match in index 14
:param fieldnames: List of the BLAST fields used
:param gene_names: List of all gene names in the analysis
:param notes: List of sample-specific notes
:param aa_allele_path: String of the absolute path to the folder containing amino acid allele files
:param report_path: String of the absolute path to the folder into which reports are to be written
them to be considered co-located. Default is 50 bp
:param cutoff: Integer of the minimum percent identity between query and subject sequence.
Default is 90
:return: runmetadata: Updated MetadataObject
:return: filtered: Boolean of whether the sample fails quality/length checks
:return: notes: Updated list of sample-specific notes
"""
logging.info('Parsing BLAST outputs')
# Initialise a boolean to track if the sequence fails checks
filtered = False
for sample in runmetadata.samples:
# Initialise GenObjects as required
sample.alleles.blastlist = []
sample.alleles.targetsequence = {}
# Read the BLAST outputs into a dictionary
blastdict = create_blast_dict(
sample=sample,
extended_fieldnames=extended_fieldnames
)
# Initialise a boolean to track whether this contig:query_range has already been processed
processed = False
# Go through each BLAST result
for row in blastdict:
# Ignore the headers
if row['query_id'].startswith(fieldnames[0]):
continue
# Create variables to reduce extra typing and for extra clarity
target_id = row['subject_id']
percent_id = float(row['percent_match'])
# If the match is perfect
if percent_id == 100:
# Add the name of the matching allele to the list
sample.alleles.blastlist.append(target_id)
# Update the processed boolean to indicate that this region has been processed
processed = True
# If the match is imperfect, but greater than the cutoff
elif cutoff < percent_id < 100 and not processed:
# Determine which gene is being processed by finding the match of the genes against the allele
gene = [gene for gene in gene_names if gene in target_id][0]
query_seq = row['query_sequence']
# Evaluate the sequence for length, as well as required start/stop codons
filtered, notes = evaluate_translated_allele(
aa_seq=query_seq,
gene=gene,
notes=notes,
aa=True
)
# Find the next available allele identifier in the database
aa_allele_id = find_next_allele(
gene=gene,
allele_path=aa_allele_path
)
# Set the name of the allele as gene_alleleID
aa_allele = f'{gene}_{aa_allele_id}'
# Update the allele database with the novel allele
notes, aa_allele = update_allele_databases(
query_sequence=query_seq,
header=aa_allele,
filtered=filtered,
gene=gene,
report_path=report_path,
allele_path=aa_allele_path,
notes=notes,
molecule='Amino acid'
)
# If the allele passes the necessary checks, update the list of results
if not filtered:
sample.alleles.blastlist.append(aa_allele)
# Update the processed boolean
processed = True
return runmetadata, filtered, notes
def analyse_aa_alleles(
runmetadata: MetadataObject,
gene_names: list,
notes: list):
"""
Analyse supplied amino acid alleles to ensure that they pass quality checks
:param runmetadata: MetadataObject with list of GenObjects for each query
:param gene_names: List of all gene names in the analysis
:param notes: List of sample-specific notes
:return: runmetadata: Updated MetadataObject
:return: notes: Updated list of sample-specific notes
"""
# Iterate through all the samples
for sample in runmetadata.samples:
# Iterate over all the records in the file (should only be one, as these files must be split with the
# split_alleles function)
for record in SeqIO.parse(sample.general.bestassemblyfile, 'fasta'):
# Determine to which gene the allele corresponds
gene = [gene for gene in gene_names if gene in record.id][0]
# Perform content/length checks
filtered, notes = evaluate_translated_allele(
aa_seq=record.seq,
gene=gene,
notes=notes,
aa=True
)
return runmetadata, notes
def report_aa_alleles(
runmetadata: MetadataObject,
report_file: str,
notes: list):
"""
Create an amino acid query-specific report with sample name, allele match, and notes
:param runmetadata: MetadataObject with list of GenObjects for each query
:param report_file: String of absolute path to the report file
:param notes: List of sample-specific notes
"""
# Initialise the header string
header = 'Sample\tMatch\tNotes\n'
# Create an empty string to store the sample-specific results
data = str()
# Iterate over all the samples
for sample in runmetadata.samples:
# Extract the list of hits from the MetadataObject, and join with semicolons
matches = ';'.join(sample.alleles.blastlist)
# Join the list of notes with
note = ';'.join(notes)
# Populate the data string with the matches and notes
data += f'{sample.name}\t{matches}\t{note}\n'
# If the report doesn't already exist write the header and data string
if not os.path.isfile(report_file):
with open(report_file, 'w', encoding='utf-8') as report:
report.write(header)
report.write(data)
# Otherwise write only the data string
else:
with open(report_file, 'a+', encoding='utf-8') as report:
report.write(data)
def load_alleles(
allele_path: str,
allele_order: dict):
"""
Use SeqIO to read in allele files
:param str allele_path: Name and path of folder containing allele files
:param dict allele_order: Dictionary of stx gene name: allele order to use
:return: str stx_gene: Name of stx gene (stx1 or stx2) being concatenated
:return: dict allele_dict: Dictionary of stx gene name: allele name: allele sequence
"""
# Initialise variable to store the stx gene name being analysed and the sequence of the alleles
stx_gene = None
allele_dict = {}
# Find all the allele files in the folder
allele_files = glob(os.path.join(allele_path, '*.fasta'))
for allele_file in allele_files:
# Set the name of the subunit by splitting off the path information and the file extension from the file
allele_name = os.path.splitext(os.path.basename(allele_file))[0]
# Determine which stx gene is being processed
for gene_name, alleles in allele_order.items():
# The name of the current subunit is present in the list of subunits linked to the stx gene
if allele_name in alleles:
stx_gene = gene_name
# Initialise the subunit name in the dictionary as required
if allele_name not in allele_dict:
allele_dict[allele_name] = {}
# Use SeqIO to read in the allele file
for record in SeqIO.parse(handle=allele_file, format='fasta'):
# Add the name of the allele and its sequence to the dictionary
allele_dict[allele_name][record.id] = str(record.seq)
return stx_gene, allele_dict
def concatenate_alleles(
profile_data: dict,
allele_dict: dict,
allele_order: dict,
stx_gene: str,
linker_length_dict: dict,
molecule: str):
"""
:param profile_data: Dictionary of all profiles in seq_type: {gene name: allele ID} format
:param allele_dict: Dictionary of stx gene name: allele name: allele sequence
:param dict allele_order: Dictionary of stx gene name: allele order to use
:param str stx_gene: Name of stx gene (stx1 or stx2) being concatenated
:param dict linker_length_dict: Dictionary of gene name: length of linker sequence to use
:param str molecule: String of the current molecule. Options are "nt" (nucleotide) and "aa" (amino acid)
:return: concatenated_sequences: List of SeqRecords for all concatenated sequences
"""
# Create a list to store SeqRecords of the concatenated sequences
concatenated_sequences = []
# Iterate over all the sequence type: profile pairs in profile_data
for seq_type, profile in profile_data.items():
# Initialise a string to store the concatenated sequences
concatenated_seq = str()
# Create a boolean to store if one (or both) of the allele subunits is missing
complete = True
# Create a variable to store the name of the concatenated allele
concatenated_name = str()
# Iterate over the subunits in order from the allele_order dictionary
for subunit in allele_order[stx_gene]:
# Extract the allele number from the profile dictionary using the subunit as the key
allele_number = profile[subunit]
# If the allele number is 0, the subunit is absent, and the concatenated sequence will not be created
if allele_number == '0':
complete = False
continue
# Set the full allele name by joining the subunit with the allele number
full_allele_name = f'{subunit}_{allele_number}'
# Extract the string of the allele sequence from the allele dictionary
allele_seq = allele_dict[subunit][full_allele_name]
# If the first subunit is already present, simply append the second subunit to the string
if concatenated_seq:
concatenated_seq += allele_seq
# Otherwise the linker sequence must be created
else:
# Extract the gene-specific linker length from the dictionary
linker_length = linker_length_dict[stx_gene]
# Nucleotide sequences will use N as the linker sequence
if molecule == 'nt':
linker = 'N' * linker_length
# Amino acid sequences will use X as the linker sequence, and will be reduced by a factor of three
else:
linker = 'X' * int(linker_length / 3)
concatenated_seq += f'{allele_seq}{linker}'
# Update the name of the concatenated sequence
if concatenated_name:
concatenated_name += f'_{allele_number}'
else:
concatenated_name = allele_number
# Do not add incomplete sequences to the list
if not complete:
continue
# Create a SeqRecord of the allele using the novel allele name and sequence
concatenated_sequences.append(
SeqRecord(
seq=Seq(concatenated_seq),
id=concatenated_name,
name='',
description=''
)
)
return concatenated_sequences
def write_concatenated_sequences(
concatenated_sequences: list,
concatenate_path: str,
file_name: str,
molecule: str):
"""
Write the concatenated sequences to file
:param list concatenated_sequences: List of all SeqRecord objects for the concatenated sequences
:param str concatenate_path: Name and absolute path of the folder into which the FASTA files of the concatenated
sequences are to be written
:param str file_name: File name to use. 'ECs2974_ECs2973' (stx1) and 'ECs1205_ECs1206' (stx2)
:param str molecule: String of the current molecule. Options are "nt" (nucleotide) and "aa" (amino acid)
"""
# Set the name of the output path by adding the molecule to the supplied path
output_path = os.path.join(concatenate_path, molecule)
make_path(inpath=output_path)
# Clear out any previous iterations of this script
previous_fastas = glob(os.path.join(output_path, '*.fasta'))
for fasta in previous_fastas:
os.remove(fasta)
# Set the name of the file to use
concatenated_file = os.path.join(output_path, f'{file_name}.fasta')
# Iterate over all the concatenated sequences
for concatenated_seq in concatenated_sequences:
with open(concatenated_file, 'a+', encoding='utf-8') as output_file:
SeqIO.write(
sequences=concatenated_seq,
handle=output_file,
format='fasta'
)
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/allele_tools/methods.py
|
methods.py
|
# Standard imports
from argparse import (
ArgumentParser,
RawTextHelpFormatter
)
from glob import glob
import logging
import multiprocessing
import sys
import os
# Third party inputs
from olctools.accessoryFunctions.accessoryFunctions import (
make_path,
MetadataObject,
)
# Local imports
from allele_tools.allele_profiler import (
allele_prep,
parseable_blast_outputs,
read_profile
)
from allele_tools.allele_translate_reduce import Translate
from allele_tools.profile_reduce import ProfileReduce
from allele_tools.version import __version__
from allele_tools.methods import (
analyse_aa_alleles,
blast_alleles,
common_allele_find_errors,
concatenate_alleles,
create_aa_allele_comprehension,
create_frozen_allele_comprehension,
create_gene_names,
create_nt_allele_comprehension,
create_stec_report,
error_print,
load_alleles,
match_profile,
parse_aa_blast,
parse_colocated_results,
pathfinder,
profile_allele_check,
query_prep,
report_aa_alleles,
setup_arguments,
split_alleles,
write_concatenated_sequences
)
__author__ = 'adamkoziol'
class STEC:
"""
Perform the STEC-specific allele finding
"""
def main(self):
"""
Run the appropriate methods in the correct order
"""
# Create metadata objects for all files in the query folder
self.runmetadata = query_prep(
query_path=self.query_path,
runmetadata=self.runmetadata
)
for sample in self.runmetadata.samples:
logging.debug('Processing sample %s', sample.name)
records, gene_names, self.data = \
allele_prep(
allele_path=self.nt_allele_path,
gene_names=self.gene_names,
combined_targets=self.combined_targets,
amino_acid=self.amino_acid
)
gene_names = sorted(gene_names)
logging.info('Loading profile')
nt_profile_data = read_profile(profile_file=self.nt_profile_file)
aa_profile_data = read_profile(profile_file=self.aa_profile_file)
# Extract the sequence type and allele dictionary from the profile file
blast_alleles(
runmetadata=sample,
amino_acid=self.amino_acid,
combined_targets=self.combined_targets,
cpus=self.cpus,
outfmt=self.outfmt
)
# Add the header to the BLAST outputs, and filter results based on cutoff
parseable_blast_outputs(
runmetadata=sample,
fieldnames=self.fieldnames,
extended_fieldnames=self.extended_fieldnames,
records=records,
cutoff=90
)
# Parse the BLAST results
sample, notes = parse_colocated_results(
runmetadata=sample,
fieldnames=self.fieldnames,
extended_fieldnames=self.extended_fieldnames,
amino_acid=self.amino_acid,
gene_names=gene_names,
nt_allele_path=self.nt_allele_path,
aa_allele_path=self.aa_allele_path,
report_path=self.report_path
)
# Create nucleotide allele comprehensions from the BLAST outputs
nt_allele_comprehension = create_nt_allele_comprehension(
runmetadata=sample,
gene_names=gene_names
)
# Create an amino acid allele comprehensions from the translated BLAST outputs
aa_allele_comprehension = create_aa_allele_comprehension(
runmetadata=sample,
gene_names=gene_names,
)
# Freeze the nucleotide allele comprehension
nt_frozen_allele_comprehension = create_frozen_allele_comprehension(
allele_comprehension=nt_allele_comprehension
)
# Freeze the amino acid allele comprehension
aa_frozen_allele_comprehension = create_frozen_allele_comprehension(
allele_comprehension=aa_allele_comprehension
)
# Find nucleotide profile matches
nt_profile_matches, nt_frozen_profiles = match_profile(
profile_data=nt_profile_data,
frozen_allele_comprehension=nt_frozen_allele_comprehension,
report_path=self.report_path,
profile_file=self.nt_profile_file,
genes=gene_names,
allele_comprehension=nt_allele_comprehension,
molecule='nt'
)
# Find amino acid profile matches
aa_profile_matches, aa_frozen_profiles = match_profile(
profile_data=aa_profile_data,
frozen_allele_comprehension=aa_frozen_allele_comprehension,
report_path=self.report_path,
profile_file=self.aa_profile_file,
genes=gene_names,
allele_comprehension=aa_allele_comprehension,
molecule='aa'
)
# Create the STEC-specific report
create_stec_report(
runmetadata=sample,
nt_profile_matches=nt_profile_matches,
nt_alleles=nt_allele_comprehension,
aa_profile_matches=aa_profile_matches,
aa_alleles=aa_allele_comprehension,
report_file=self.report_file,
gene_names=gene_names,
aa_profile_path=self.aa_profile_path,
notes=notes
)
def __init__(self, allele_path, aa_allele_path, profile_file, aa_profile_file, query_path, report_path,
amino_acid=False):
self.nt_allele_path = pathfinder(path=allele_path)
self.aa_allele_path = pathfinder(path=aa_allele_path)
self.nt_profile_file = pathfinder(path=profile_file)
self.aa_profile_file = pathfinder(path=aa_profile_file)
self.profile_path = os.path.dirname(self.nt_profile_file)
self.aa_profile_path = os.path.dirname(self.aa_profile_file)
self.query_path = pathfinder(path=query_path)
self.report_path = pathfinder(path=report_path)
self.aa_report_path = self.report_path
make_path(inpath=self.report_path)
novel_alleles = glob(os.path.join(self.report_path, '*.fasta'))
for novel_allele in novel_alleles:
os.remove(novel_allele)
self.amino_acid = amino_acid
if not self.amino_acid:
self.combined_targets = os.path.join(self.nt_allele_path, 'combinedtargets.fasta')
else:
self.combined_targets = os.path.join(self.aa_allele_path, 'combinedtargets.fasta')
self.gene_names = []
self.runmetadata = MetadataObject()
self.runmetadata.samples = []
self.cpus = multiprocessing.cpu_count() - 1
self.profile_report = os.path.join(self.report_path, 'nt_profiles.tsv')
self.aa_profile_report = os.path.join(self.aa_report_path, 'aa_profiles.tsv')
try:
os.remove(self.profile_report)
except FileNotFoundError:
pass
self.report_file = os.path.join(self.report_path, 'stec_report.tsv')
reports = glob(os.path.join(self.report_path, '*.tsv'))
for report in reports:
os.remove(report)
# Fields used for custom outfmt 6 BLAST output:
self.fieldnames = [
'query_id',
'subject_id',
'identical',
'mismatches',
'gaps',
'evalue',
'bit_score',
'query_length',
'subject_length',
'alignment_length',
'query_start',
'query_end',
'subject_start',
'subject_end',
'query_sequence',
'subject_sequence'
]
self.extended_fieldnames = self.fieldnames.copy()
self.extended_fieldnames.insert(14, 'percent_match')
self.outfmt = '6 qseqid sseqid nident mismatch gaps evalue bitscore qlen slen length ' \
'qstart qend sstart send qseq sseq'
# A string of the header to use for formatting the profile file, and the report headers
self.data = str()
self.aa_allele_dict = {}
self.aa_nt_allele_link_dict = {}
class AASTEC:
def main(self):
"""
Run the necessary methods in the correct order
"""
# Create metadata objects for all files in the query folder
self.runmetadata = query_prep(
query_path=self.query_path,
runmetadata=self.runmetadata,
clear_report=True
)
for sample in self.runmetadata.samples:
logging.debug('Processing sample %s', sample.name)
notes = []
records, gene_names, self.data = \
allele_prep(
allele_path=self.aa_allele_path,
gene_names=self.gene_names,
combined_targets=self.combined_targets,
amino_acid=self.amino_acid
)
gene_names = sorted(gene_names)
logging.info('Loading profile')
if not os.path.isfile(sample.alleles.blast_report):
#
blast_alleles(
runmetadata=sample,
amino_acid=self.amino_acid,
combined_targets=self.combined_targets,
cpus=self.cpus,
outfmt=self.outfmt
)
# Add headers to the BLAST outputs, and filter based on cutoff value
parseable_blast_outputs(
runmetadata=sample,
fieldnames=self.fieldnames,
extended_fieldnames=self.extended_fieldnames,
records=records,
cutoff=self.cutoff
)
# Parse the amino acid BLAST results
sample, filtered, notes = parse_aa_blast(
runmetadata=sample,
extended_fieldnames=self.extended_fieldnames,
fieldnames=self.fieldnames,
gene_names=gene_names,
notes=notes,
aa_allele_path=self.aa_allele_path,
report_path=self.report_path,
cutoff=self.cutoff
)
# Perform content/length checks of the supplied alleles
sample, notes = analyse_aa_alleles(
runmetadata=sample,
gene_names=gene_names,
notes=notes)
# Create an amino acid query report
report_aa_alleles(
runmetadata=sample,
report_file=self.report_file,
notes=notes
)
def __init__(self, aa_allele_path, query_path, report_path, cutoff, amino_acid=True):
self.aa_allele_path = pathfinder(path=aa_allele_path)
self.query_path = pathfinder(path=query_path)
self.report_path = pathfinder(path=report_path)
self.aa_report_path = self.report_path
make_path(inpath=self.report_path)
novel_alleles = glob(os.path.join(self.report_path, '*.fasta'))
for novel_allele in novel_alleles:
os.remove(novel_allele)
self.cutoff = cutoff
self.amino_acid = amino_acid
self.combined_targets = os.path.join(self.aa_allele_path, 'combinedtargets.fasta')
self.gene_names = []
self.runmetadata = MetadataObject()
self.runmetadata.samples = []
self.cpus = multiprocessing.cpu_count() - 1
self.report_file = os.path.join(self.report_path, 'allele_report.tsv')
try:
os.remove(self.report_file)
except FileNotFoundError:
pass
# Fields used for custom outfmt 6 BLAST output:
self.fieldnames = [
'query_id',
'subject_id',
'identical',
'mismatches',
'gaps',
'evalue',
'bit_score',
'query_length',
'subject_length',
'alignment_length',
'query_start',
'query_end',
'subject_start',
'subject_end',
'query_sequence',
'subject_sequence'
]
self.extended_fieldnames = self.fieldnames.copy()
self.extended_fieldnames.insert(14, 'percent_match')
self.outfmt = '6 qseqid sseqid nident mismatch gaps evalue bitscore qlen slen length ' \
'qstart qend sstart send qseq sseq'
# A string of the header to use for formatting the profile file, and the report headers
self.data = str()
class AlleleConcatenate:
"""
Concatenate stx subunits. Read in profile files. Load alleles. Concatenate alleles with appropriate linker
"""
def main(self):
"""
Run the necessary methods for AlleleConcatenate
"""
self.gene, self.nt_alleles = load_alleles(
allele_path=self.nt_allele_path,
allele_order=self.allele_order
)
self.gene, self.aa_alleles = load_alleles(
allele_path=self.aa_allele_path,
allele_order=self.allele_order
)
logging.info('Concatenating allele sequences')
self.concatenated_nt_seq = concatenate_alleles(
profile_data=self.nt_profile_data,
allele_dict=self.nt_alleles,
allele_order=self.allele_order,
stx_gene=self.gene,
linker_length_dict=self.linker_length_dict,
molecule='nt'
)
self.concatenated_aa_seq = concatenate_alleles(
profile_data=self.aa_profile_data,
allele_dict=self.aa_alleles,
allele_order=self.allele_order,
stx_gene=self.gene,
linker_length_dict=self.linker_length_dict,
molecule='aa'
)
logging.info('Writing concatenated allele sequences to file')
write_concatenated_sequences(
concatenated_sequences=self.concatenated_nt_seq,
concatenate_path=self.concatenate_path,
file_name=self.gene_allele[self.gene],
molecule='nt'
)
write_concatenated_sequences(
concatenated_sequences=self.concatenated_aa_seq,
concatenate_path=self.concatenate_path,
file_name=self.gene_allele[self.gene],
molecule='aa'
)
def __init__(
self,
nt_allele_path,
aa_allele_path,
nt_profile_file,
aa_profile_file,
concatenate_path):
self.nt_allele_path = pathfinder(path=nt_allele_path)
self.aa_allele_path = pathfinder(path=aa_allele_path)
self.nt_profile_file = pathfinder(path=nt_profile_file)
self.aa_profile_file = pathfinder(path=aa_profile_file)
self.nt_profile_path = os.path.dirname(self.nt_profile_file)
self.aa_profile_path = os.path.dirname(self.aa_profile_file)
self.concatenate_path = pathfinder(path=concatenate_path)
self.linker_length_dict = {
'stx1': 9,
'stx2': 12,
}
# Set the appropriate order for the genes in the report (stx1 genes are not in numerical order)
self.allele_order = {
'stx1': ['ECs2974', 'ECs2973'],
'stx2': ['ECs1205', 'ECs1206']
}
self.gene_allele = {
'stx1': 'ECs2974_ECs2973',
'stx2': 'ECs1205_ECs1206'
}
self.nt_profile_data = read_profile(profile_file=self.nt_profile_file)
self.aa_profile_data = read_profile(profile_file=self.aa_profile_file)
self.gene = str()
self.nt_alleles = {}
self.aa_alleles = {}
self.concatenated_nt_seq = []
self.concatenated_aa_seq = []
def profile_reduce(args):
"""
Reduce the Enterobase profile to only the genes of interest
:param args: type ArgumentParser arguments
"""
# Create the gene names file if it doesn't exist or is empty
genes_path = os.path.dirname(args.gene_names)
genes_file = os.path.basename(args.gene_names)
logging.info(genes_path)
logging.info(genes_file)
if not os.path.isfile(args.gene_names):
# Ensure that the path exists
if not os.path.isdir(genes_path):
logging.error(
'Could not locate the supplied path, %s, for the gene file. Please ensure that it'
' exists, and that it either contains the gene file, or files with '
'.fasta extensions',
genes_path
)
raise SystemExit
logging.warning(
'Could not locate the supplied gene file: %s. Will now attempt to create it in '
'directory %s',
args.gene_names, genes_path
)
# Attempt to create the file
create_gene_names(
path=genes_path,
name=genes_file
)
else:
# Ensure that the file isn't empty
if os.stat(args.gene_names).st_size == 0:
logging.warning(
'The supplied gene file, %s, is empty. Will now attempt to populate it in '
'directory %s',
args.gene_names, genes_path
)
# Attempt to repopulate the file
create_gene_names(
path=genes_path,
name=genes_file
)
logging.info(
'Reducing profile file %s to include only the genes found in %s',
args.profile_file, args.gene_names
)
# Create a ProfileReduce object
profile_reduction = ProfileReduce(
profile=args.profile_file,
names=args.gene_names,
output=args.output_folder
)
profile_reduction.main()
logging.info('Profile reduction complete!')
def translate_reduce(args):
"""
Translate nucleotide alleles to amino acid, and remove duplicates
:param args: type ArgumentParser arguments
"""
log_str = f'Translating and reducing alleles in: {args.allele_path}'
if args.profile_file:
log_str += f'. Parsing {args.profile_file} nucleotide profile to create corresponding,'\
' reduced amino acid profile'
logging.info(log_str)
length_dict = {
'ECs2973': 90,
'ECs2974': 316,
'ECs1205': 316,
'ECs1206': 88
}
allele_translate_reduce = Translate(
path=args.allele_path,
profile=args.profile_file,
report_path=args.report_path,
translated_path=args.translated_path,
length_dict=length_dict,
)
allele_translate_reduce.main()
logging.info('Allele translation and reduction complete!')
def allele_find(args):
"""
Perform allele discovery analyses
:param args: type ArgumentParser arguments
"""
log_str = f'Performing STEC allele discovery on sequences in {args.query_path} using ' \
f'nucleotide alleles in {args.nt_alleles}, nucleotide profile in {args.nt_profile}, amino' \
f' acid alleles in {args.aa_alleles}, and amino acid profile in {args.aa_profile}'
logging.info(log_str)
errors = []
# Nucleotide allele checks
if not os.path.isdir(args.nt_alleles):
errors.append(f'Could not find supplied nucleotide allele folder: {args.nt_alleles}')
else:
if not glob(os.path.join(args.nt_alleles, '*.fasta')):
errors.append(
f'Could not locate sequence files in supplied nucleotide allele folder: {args.nt_alleles}'
)
# Find errors for amino acid and query checks
errors = common_allele_find_errors(
args=args,
errors=errors,
amino_acid=False
)
if not os.path.isfile(args.nt_profile):
errors.append(f'Could not locate supplied nucleotide profile file: {args.nt_profile}')
if errors:
error_print(errors=errors)
stec = STEC(
allele_path=args.nt_alleles,
aa_allele_path=args.aa_alleles,
profile_file=args.nt_profile,
aa_profile_file=args.aa_profile,
query_path=args.query_path,
report_path=args.report_path
)
stec.main()
def aa_allele_find(args):
"""
Perform allele discovery analyses on amino acid query files
:param args: type ArgumentParser arguments
"""
log_str = f'Performing STEC allele discovery on amino acid sequences in {args.query_path} using amino' \
f' acid alleles in {args.aa_alleles}'
logging.info(log_str)
errors = []
# Find errors for amino acid and query checks
errors = common_allele_find_errors(
args=args,
errors=errors,
amino_acid=True
)
if errors:
error_print(errors=errors)
aa_stec = AASTEC(
aa_allele_path=args.aa_alleles,
query_path=args.query_path,
report_path=args.report_path,
cutoff=args.cutoff
)
aa_stec.main()
def allele_split(args):
"""
Split files containing multiple alleles into individual files
:param args: type ArgumentParser arguments
"""
logging.info('Splitting allele files in %s into individual files', args.query_path)
errors = []
allele_files = glob(os.path.join(args.query_path, '*.fasta'))
# Query checks
if not os.path.isdir(args.query_path):
errors.append(f'Could not find supplied nucleotide allele folder: {args.query_path}')
else:
if not allele_files:
errors.append(
f'Could not locate sequence files in supplied allele folder: {args.query_path}'
)
if errors:
error_print(errors=errors)
split_alleles(
allele_files=allele_files,
output_path=args.output_path
)
def allele_concatenate(args):
"""
Concatenate subunit files with linkers. Provide linkages between nucleotide and amino acid files
:param args: type ArgumentParser arguments
"""
logging.info('Concatenating allele subunits')
errors = []
# Determine if the profile file, the allele folder, and the alleles all exist
errors = profile_allele_check(
args=args,
errors=errors
)
# If there were any errors with the supplied arguments, print them, and exit
if errors:
error_print(errors=errors)
concatenate = AlleleConcatenate(
nt_allele_path=args.nt_alleles,
aa_allele_path=args.aa_alleles,
nt_profile_file=args.nt_profile,
aa_profile_file=args.aa_profile,
concatenate_path=args.concatenate_path
)
concatenate.main()
def cli():
"""
Collect the arguments, create an object, and run the script
"""
# Parser for arguments
parser = ArgumentParser(
description='Determines STEC subunit profiles'
)
subparsers = parser.add_subparsers(title='Available analyses')
# Create a parental parser from subparsers can inherit arguments
parent_parser = ArgumentParser(add_help=False)
# Add arguments common to all subparsers to the parent parser
parent_parser.add_argument(
'-version', '--version',
action='version',
version=f'%(prog)s commit {__version__}'
)
parent_parser.add_argument(
'-v', '--verbosity',
choices=['debug', 'info', 'warning', 'error', 'critical'],
metavar='verbosity',
default='info',
help='Set the logging level. Options are debug, info, warning, error, and critical. '
'Default is info.'
)
profile_reduce_subparser = subparsers.add_parser(
parents=[parent_parser],
name='profile_reduce',
description='Reduce full wgMLST profile from Enterobase using genes of interest',
formatter_class=RawTextHelpFormatter,
help='Reduce full wgMLST profile from Enterobase using genes of interest'
)
profile_reduce_subparser.add_argument(
'-p', '--profile_file',
metavar='profile_file',
default='profiles.list',
help='Specify name and path of profile file. If not provided, the default '
'"profiles.list" in the current working directory will be used'
)
profile_reduce_subparser.add_argument(
'-g', '--gene_names',
metavar='gene_names',
default='genes.txt',
help='Name and path of text file containing gene names to use to filter the profile file '
'(one per line). If not provided, the default "genes.txt" in the current working '
'directory will be used. If the file does not exist, the program will attempt to '
'create a file using the .fasta files in the current working directory'
)
profile_reduce_subparser.add_argument(
'-o', '--output_folder',
metavar='output_folder',
default='nt_profile',
help='Name and path of folder into which the reduced profile and notes are to be placed. '
'If not provided, the default "nt_profile" folder in the current working directory '
'will be used'
)
profile_reduce_subparser.set_defaults(func=profile_reduce)
# Create a subparser for allele translation and reduction
allele_translate_reduce_subparser = subparsers.add_parser(
parents=[parent_parser],
name='allele_translate_reduce',
description='Translate allele files in nucleotide format to amino acid. Remove duplicates. Keep notes',
formatter_class=RawTextHelpFormatter,
help='Translate allele files in nucleotide format to amino acid. '
'Remove duplicates. Keep notes'
)
allele_translate_reduce_subparser.add_argument(
'-a', '--allele_path',
metavar='allele_path',
default=os.path.join(os.getcwd(), 'nt_alleles'),
help='Specify name and path of folder containing allele files. If not provided, the '
'nt_alleles folder in the current working directory will be used by default'
)
allele_translate_reduce_subparser.add_argument(
'-p', '--profile_file',
metavar='profile_file',
help='Optionally specify name and path of profile file. '
'Parse the nucleic acid profile, and create the corresponding reduced amino acid profile'
)
allele_translate_reduce_subparser.add_argument(
'-r', '--report_path',
metavar='report_path',
default=os.path.join(os.getcwd(), 'aa_profile'),
help='Specify the name and path of the folder into which outputs are to be placed. If not '
'provided, the aa_profile folder in the current working directory will be used'
)
allele_translate_reduce_subparser.add_argument(
'-t', '--translated_path',
metavar='translated_path',
default=os.path.join(os.getcwd(), 'aa_alleles'),
help='Specify the name and path of the folder into which alleles are to be placed. If not '
'provided, the aa_alleles folder in the current working directory will be used'
)
allele_translate_reduce_subparser.set_defaults(func=translate_reduce)
# Create a subparser for allele discovery
allele_find_subparser = subparsers.add_parser(
parents=[parent_parser],
name='allele_find',
description='Analyse sequences to determine allele complement. Update profiles and '
'databases. Keep notes',
formatter_class=RawTextHelpFormatter,
help='Analyse sequences to determine allele complement. Update profiles and databases. '
'Keep notes'
)
allele_find_subparser.add_argument(
'--nt_profile',
metavar='nt_profile',
default=os.path.join(os.getcwd(), 'nt_profile', 'profile.txt'),
help='Specify name and path of nucleotide profile file. If not provided, profile.txt in '
'the nt_profile folder in the current working directory will be used by default'
)
allele_find_subparser.add_argument(
'--aa_profile',
metavar='aa_profile',
default=os.path.join(os.getcwd(), 'aa_profile', 'profile.txt'),
help='Specify name and path of amino acid profile file. If not provided, profile.txt in '
'the aa_profile folder in the current working directory will be used by default'
)
allele_find_subparser.add_argument(
'--nt_alleles',
metavar='nt_alleles',
default=os.path.join(os.getcwd(), 'nt_alleles'),
help='Specify name and path of folder containing nucleotide alleles. If not provided, the '
'nt_allele folder in the current working directory will be used by default'
)
allele_find_subparser.add_argument(
'--aa_alleles',
metavar='aa_alleles',
default=os.path.join(os.getcwd(), 'aa_alleles'),
help='Specify name and path of folder containing amino acid alleles. If not provided, the '
'aa_allele folder in the current working directory will be used by default'
)
allele_find_subparser.add_argument(
'-r', '--report_path',
metavar='report_path',
default=os.path.join(os.getcwd(), 'reports'),
help='Specify name and path of folder into which reports are to be placed. If not '
'provided, the reports folder in the current working directory will be used'
)
allele_find_subparser.add_argument(
'-q', '--query_path',
metavar='query_path',
default=os.path.join(os.getcwd(), 'query'),
help='Specify name and path of folder containing query files in FASTA format. '
'If not provided, the query folder in the current working directory will be used'
)
allele_find_subparser.set_defaults(func=allele_find)
# Create a subparser for allele discovery
aa_allele_find_subparser = subparsers.add_parser(
parents=[parent_parser],
name='aa_allele_find',
description='Analyse amino acid sequences to determine allele complement. Update profiles and '
'databases. Keep notes',
formatter_class=RawTextHelpFormatter,
help='Analyse amino acid sequences to determine allele complement. Update profiles and databases. '
'Keep notes'
)
aa_allele_find_subparser.add_argument(
'--aa_alleles',
metavar='aa_alleles',
default=os.path.join(os.getcwd(), 'aa_alleles'),
help='Specify name and path of folder containing amino acid alleles. If not provided, the '
'aa_allele folder in the current working directory will be used by default'
)
aa_allele_find_subparser.add_argument(
'-r', '--report_path',
metavar='report_path',
default=os.path.join(os.getcwd(), 'reports'),
help='Specify name and path of folder into which reports are to be placed. If not '
'provided, the reports folder in the current working directory will be used'
)
aa_allele_find_subparser.add_argument(
'-q', '--query_path',
metavar='query_path',
default=os.path.join(os.getcwd(), 'query'),
help='Specify name and path of folder containing query files in FASTA format. '
'If not provided, the query folder in the current working directory will be used'
)
aa_allele_find_subparser.add_argument(
'-c', '--cutoff',
metavar='cutoff',
default=90,
choices=[percent for percent in range(90, 101)],
help='Specify the percent identity cutoff for matches. Allowed values are between 90 and 100. Default is 100'
)
aa_allele_find_subparser.set_defaults(func=aa_allele_find)
# Create a subparser for splitting multi-FASTA files of alleles
allele_split_subparser = subparsers.add_parser(
parents=[parent_parser],
name='allele_split',
description='Split combined allele files into individual files',
formatter_class=RawTextHelpFormatter,
help='Split combined allele files into individual files'
)
allele_split_subparser.add_argument(
'-q', '--query_path',
metavar='query_path',
default=os.path.join(os.getcwd(), 'query'),
help='Specify name and path of folder containing query files in FASTA format. '
'If not provided, the query folder in the current working directory will be used'
)
allele_split_subparser.add_argument(
'-o', '--output_path',
metavar='output_path',
default=os.path.join(os.getcwd(), 'split_alleles'),
help='Specify name and path of folder into which the split allele files are to be written. '
'If not provided, the split_alleles folder in the current working directory will be used'
)
allele_split_subparser.set_defaults(func=allele_split)
# Create a subparser for concatenating stx subunits
allele_concatenate_subparser = subparsers.add_parser(
parents=[parent_parser],
name='allele_concatenate',
description='Concatenate stx toxin subunit alleles with linkers',
formatter_class=RawTextHelpFormatter,
help='Concatenate stx toxin subunit alleles with linkers'
)
allele_concatenate_subparser.add_argument(
'--nt_profile',
metavar='nt_profile',
default=os.path.join(os.getcwd(), 'nt_profile', 'profile.txt'),
help='Specify name and path of nucleotide profile file. If not provided, profile.txt in '
'the nt_profile folder in the current working directory will be used by default'
)
allele_concatenate_subparser.add_argument(
'--aa_profile',
metavar='aa_profile',
default=os.path.join(os.getcwd(), 'aa_profile', 'profile.txt'),
help='Specify name and path of amino acid profile file. If not provided, profile.txt in '
'the aa_profile folder in the current working directory will be used by default'
)
allele_concatenate_subparser.add_argument(
'--nt_alleles',
metavar='nt_alleles',
default=os.path.join(os.getcwd(), 'nt_alleles'),
help='Specify name and path of folder containing nucleotide alleles. If not provided, the '
'nt_allele folder in the current working directory will be used by default'
)
allele_concatenate_subparser.add_argument(
'--aa_alleles',
metavar='aa_alleles',
default=os.path.join(os.getcwd(), 'aa_alleles'),
help='Specify name and path of folder containing amino acid alleles. If not provided, the '
'aa_allele folder in the current working directory will be used by default'
)
allele_concatenate_subparser.add_argument(
'-c', '--concatenate_path',
metavar='concatenate_path',
default=os.path.join(os.getcwd(), 'concatenated_alleles'),
help='Specify name and path of folder into which concatenated subunit files are to be placed. If not '
'provided, the concatenated_alleles folder in the current working directory will be used'
)
allele_concatenate_subparser.set_defaults(func=allele_concatenate)
# Get the arguments into an object
arguments = setup_arguments(parser=parser)
# Prevent the arguments being printed to the console (they are returned in order for the tests to work)
sys.stderr = open(os.devnull, 'w', encoding='utf-8')
return arguments
if __name__ == '__main__':
cli()
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/allele_tools/stec.py
|
stec.py
|
# Standard imports
from argparse import ArgumentParser
import multiprocessing
from glob import glob
import logging
import json
import os
# Third-party imports
from olctools.accessoryFunctions.accessoryFunctions import \
make_path, \
MetadataObject, \
SetupLogging
from Bio import SeqIO
# Local imports
from allele_tools.allele_profiler import \
allele_prep, \
append_profiles, \
clear_alleles, \
create_profile, \
match_profile, \
parse_results, \
parseable_blast_outputs, \
profile_alleles, \
read_profile, \
sequence_typer
from allele_tools.methods import \
update_allele_database, \
translate, \
query_prep, \
blast_alleles, \
pathfinder
__author__ = 'adamkoziol'
class Updater:
"""
Determine allele complement and profile of sequences. Update profiles and databases as
necessary
"""
# pylint: disable=too-many-instance-attributes
def main(self):
"""
Run the appropriate methods in the correct order
"""
# Create metadata objects for all files in the query folder
self.runmetadata = query_prep(
query_path=self.query_path,
runmetadata=self.runmetadata
)
for sample in self.runmetadata.samples:
logging.debug('Processing sample %s', sample.name)
# Perform necessary prep on the alleles
if not self.amino_acid:
records, gene_names, self.data = \
allele_prep(
allele_path=self.allele_path,
gene_names=self.gene_names,
combined_targets=self.combined_targets,
amino_acid=self.amino_acid
)
else:
records, gene_names, self.data = \
allele_prep(
allele_path=self.aa_allele_path,
gene_names=self.gene_names,
combined_targets=self.combined_targets,
amino_acid=self.amino_acid
)
logging.info('Loading profile')
if not self.amino_acid:
profile_data = read_profile(profile_file=self.profile_file)
else:
profile_data = read_profile(profile_file=self.aa_profile_file)
# BLAST the query against the allele database
blast_alleles(
runmetadata=sample,
amino_acid=self.amino_acid,
combined_targets=self.combined_targets,
cpus=self.cpus,
outfmt=self.outfmt
)
# Add a user-friendly header to the BLAST outputs
parseable_blast_outputs(
runmetadata=sample,
fieldnames=self.fieldnames,
extended_fieldnames=self.extended_fieldnames,
records=records
)
# Parse the BLAST outputs
sample = parse_results(
runmetadata=sample,
fieldnames=self.fieldnames,
extended_fieldnames=self.extended_fieldnames,
amino_acid=self.amino_acid,
genome_query=True
)
# Perform sequence typing of the parsed results
if not self.amino_acid:
profile_dict, profile_set = profile_alleles(
runmetadata=sample,
profile_dict={},
profile_set=[],
records=self.gene_names,
novel_alleles=True,
genome_query=True,
allele_path=self.allele_path,
report_path=self.report_path
)
else:
profile_dict, profile_set = profile_alleles(
runmetadata=sample,
profile_dict={},
profile_set=[],
records=self.gene_names,
amino_acid=True,
novel_alleles=True,
genome_query=True,
allele_path=self.aa_allele_path,
report_path=self.report_path
)
# Match the query profile against the profile database
profile_matches = match_profile(
profile_data=profile_data,
profile_dict=profile_dict,
profile_matches={}
)
# Create new profiles as required
profile_matches, profile_data, new_profiles = \
create_profile(
profile_data=profile_data,
profile_set=profile_set,
new_profiles=[],
profile_dict=profile_dict,
profile_matches=profile_matches
)
# Perform final sequence typing, and create final report
if not self.amino_acid:
sample = sequence_typer(
profile_report=self.profile_report,
data=self.data,
runmetadata=sample,
profile_matches=profile_matches,
profile_data=profile_data,
update=True
)
# Write the novel profiles to file
append_profiles(
new_profiles=new_profiles,
profile_file=self.profile_file,
data=self.data,
novel_profiles=True,
profile_path=self.profile_path,
gene_names=self.gene_names
)
else:
sample = sequence_typer(
profile_report=self.aa_profile_report,
data=self.data,
runmetadata=sample,
profile_matches=profile_matches,
profile_data=profile_data,
update=True
)
append_profiles(
new_profiles=new_profiles,
profile_file=self.aa_profile_file,
data=self.data,
novel_profiles=True,
profile_path=self.aa_profile_path,
gene_names=self.gene_names
)
if not self.amino_acid:
# AA
sample = translate(runmetadata=sample)
self.aa_allele_prep()
aa_profile_dict, aa_profile_set = self.aa_allele_match(
runmetadata=sample,
profile_dict={},
profile_set=[],
gene_names=gene_names,
amino_acid=True
)
aa_profile_data = read_profile(profile_file=self.aa_profile_file)
aa_profile_matches = match_profile(
profile_data=aa_profile_data,
profile_dict=aa_profile_dict,
profile_matches={}
)
aa_profile_matches, aa_profile_data, aa_new_profiles = \
create_profile(
profile_data=aa_profile_data,
profile_set=aa_profile_set,
new_profiles=[],
profile_dict=aa_profile_dict,
profile_matches=aa_profile_matches
)
sample = sequence_typer(
profile_report=self.aa_profile_report,
data=self.data,
runmetadata=sample,
profile_matches=aa_profile_matches,
profile_data=aa_profile_data,
update=True,
amino_acid=True
)
make_path(self.aa_profile_path)
append_profiles(
new_profiles=aa_new_profiles,
profile_file=self.aa_profile_file,
data=self.data,
novel_profiles=True,
profile_path=self.aa_profile_path,
gene_names=self.gene_names
)
self.aa_notes(runmetadata=sample)
clear_alleles(
combined_targets_db=glob(os.path.join(self.allele_path, 'combinedtargets*')),
custom_targets=os.path.join(self.allele_path, 'custom.tfa')
)
def aa_allele_prep(self):
"""
Create (first time only) and read the amino acid allele database file
"""
# Create the amino acid allele database file path as required
make_path(self.aa_allele_path)
# Iterate through all the gene in the analysis
for gene in self.gene_names:
# Attempt to find the database file
try:
allele_file = glob(os.path.join(self.aa_allele_path, f'{gene}*.*fa*'))[0]
# Create the file if it doesn't exist
except IndexError:
allele_file = self.initialise_aa_alleles(gene=gene)
# Read in and store all the amino acid records in the allele database file
for record in SeqIO.parse(allele_file, 'fasta'):
self.aa_allele_dict[record.id] = str(record.seq)
def initialise_aa_alleles(self, gene):
"""
Create a gene-specific amino acid allele database file
:param gene: Name of current gene being analysed
:return: Name and absolute path to the created database file
"""
# Find the corresponding gene-specific nucleotide database file
nt_allele_file = glob(os.path.join(self.allele_path, f'{gene}*.*fa*'))[0]
# Set the name of the amino acid database file
aa_allele_file = os.path.join(self.aa_allele_path, f'{gene}_alleles.fasta')
# Initialise a dictionary to store str(amino acid sequence): allele name to be used
# in finding duplicate
# translated alleles
allele_dict = {}
for record in SeqIO.parse(nt_allele_file, 'fasta'):
# Replace and dashes in the record.id with underscores
record.id = record.id.replace('-', '_')
record_id = record.id
# Translate the sequence to amino acid
record = record.translate()
record.id = record_id
record.description = str()
# Extract the gene name from the allele number
gene, allele_id = record.id.rsplit('_', 1)
# Initialise the gene key in the dictionary as required
if gene not in self.aa_nt_allele_link_dict:
self.aa_nt_allele_link_dict[gene] = dict()
# Check if the translated sequence is not present in the dictionary
if str(record.seq) not in allele_dict:
# Update the dictionary with the sequence: allele
allele_dict[str(record.seq)] = record.id
# Update the dictionary with gene: allele
if not self.aa_nt_allele_link_dict[gene]:
self.aa_nt_allele_link_dict[gene][record.id] = record.id
if allele_id == '1':
# Write the translated target sequence to file
with open(aa_allele_file, 'a+', encoding='utf-8') as aa_targets:
SeqIO.write(record, aa_targets, 'fasta')
else:
if not self.aa_nt_allele_link_dict[gene]:
self.aa_nt_allele_link_dict[gene][record.id] = allele_dict[str(record.seq)]
return aa_allele_file
def aa_allele_match(self, runmetadata, profile_dict, profile_set, gene_names, amino_acid):
"""
Find match the alleles in the current sample to alleles in the database
:param runmetadata: List of metadata objects for the current sample
:param profile_dict: Dictionary to store gene:allele profile for each sample
:param profile_set: List of all unique profiles
:param gene_names: List of all gene names in the analysis
:param amino_acid: Variable indicating whether the current analyses are on DNA or
amino acid sequences
:return: profile_dict and profile_set updated with the results from the current sample
"""
for sample in runmetadata.samples:
# Initialise a dictionary to store the gene:allele combinations for each sample
allele_comprehension = {}
for allele, allele_seq in sorted(sample.alleles.nt_alleles_translated.items()):
present = False
# Strip off the allele number from the allele e.g. adk_1 yields adk, while EC042_RS26480_2 yields
# EC042_RS26480
try:
gene, _ = allele.rsplit('_', 1)
except ValueError:
gene = str()
for record_id, record_seq in sorted(self.aa_allele_dict.items()):
if allele_seq == record_seq:
# Update the dictionary with the new gene: allele number for the sample
allele_comprehension.update({gene: record_id.split('_')[-1]})
present = True
# If, after iterating through all the BLAST outputs, the gene is not present in the sample, update
# the gene: allele to reflect this absence
if not present:
novel_allele = update_allele_database(
gene=gene,
query_sequence=allele_seq,
allele_path=self.aa_allele_path,
report_path=self.report_path,
amino_acid=amino_acid
)
if novel_allele:
allele_comprehension.update({gene: novel_allele.split('_')[-1]})
else:
allele_comprehension.update({gene: '0'})
profile_dict, profile_set, allele_comprehension = self.profile_dict_set_populate(
gene_names=gene_names,
allele_comprehension=allele_comprehension,
profile_dict=profile_dict,
sample=sample,
profile_set=profile_set
)
return profile_dict, profile_set
@staticmethod
def profile_dict_set_populate(gene_names, allele_comprehension, profile_dict, sample, profile_set):
"""
Update the profile dictionary, profile set, and allele comprehension for each gene
:param gene_names: List of all gene names in the analysis
:param allele_comprehension: Dictionary of the gene:allele combinations for each sample
:param profile_dict: Dictionary to store gene:allele profile for each sample
:param sample: Metadata objects of current genome
:param profile_set: List of all unique profiles
"""
# Iterate through all the genes
for gene_name in gene_names:
if gene_name not in allele_comprehension:
allele_comprehension.update({gene_name: '0'})
# In order to hash the dictionary, use JSON, with sorted keys to freeze it
frozen_allele_comprehension = json.dumps(allele_comprehension, sort_keys=True)
# Update the dictionary of profiles with the hash of the
# frozen dictionary: list of samples with that hash
if hash(frozen_allele_comprehension) not in profile_dict:
profile_dict[hash(frozen_allele_comprehension)] = [sample.name]
else:
profile_dict[hash(frozen_allele_comprehension)].append(sample.name)
# Add the 'regular' dictionary to the list of all profiles as required
if allele_comprehension not in profile_set:
profile_set.append(allele_comprehension)
return profile_dict, profile_set, allele_comprehension
def aa_notes(self, runmetadata):
"""
Create (first time only), and update the profile and alleles notes files. These files
link amino acid profile(s) and allele(s), respectively, to the corresponding nucleotide
profile, and allele
:param runmetadata: List of metadata objects for the current sample
"""
logging.info('Creating/Updating notes')
# Read in the profile notes file
allele_profile_dict = self.read_aa_profile_notes()
for sample in runmetadata.samples:
# Check if the nucleotide sequence type has previously been encountered
try:
# Extract all the previously encountered amino acid sequence types corresponding
# to the nucleotide st
known_allele_profiles = allele_profile_dict[sample.alleles.nt_st]
# Determine whether the current amino acid st is already in the list of aa
# sequence types
if sample.alleles.aa_st not in known_allele_profiles:
# If the current st is novel, update the list of profiles, and use the
# list to update the
# notes file
allele_profile_dict[sample.alleles.nt_st].append(sample.alleles.aa_st)
self.update_aa_profile_notes(aa_profile_dict=allele_profile_dict)
# If the current nucleotide sequence type is novel, add it to the dictionary
except KeyError:
allele_profile_dict[sample.alleles.nt_st] = [sample.alleles.aa_st]
# Use the dictionary to update the notes file
self.update_aa_profile_notes(aa_profile_dict=allele_profile_dict)
# Process the allele file
for gene, allele in sample.alleles.nt_profile.items():
# Attempt to read in the previous allele notes file
allele_notes_dict = self.read_aa_allele_notes(gene=gene)
# Check if the nucleotide allele is present in the dictionary
try:
# Extract the list of all amino acid alleles corresponding
# to the nucleotide allele
try:
known_aa_alleles = allele_notes_dict[int(allele)]
# Initialise an empty list if allele_notes_dict is empty
except ValueError:
known_aa_alleles = list()
# Set the aa_allele variable to save typing the long attribute
aa_allele = sample.alleles.aa_profile[gene]
# Check to see if the amino acid allele has already been encountered
if aa_allele not in known_aa_alleles:
# Make sure that the allele isn't missing 'ND'
if aa_allele != '0':
# Add the allele to the list
allele_notes_dict[int(allele)].append(aa_allele)
# Update the notes file with the
self.update_aa_allele_notes(
gene=gene,
allele_notes_dict=allele_notes_dict
)
# If the nucleotide allele is novel, add it to the dictionary
except KeyError:
aa_allele = sample.alleles.aa_profile[gene]
if aa_allele != '0':
allele_notes_dict[int(allele)] = [aa_allele]
self.update_aa_allele_notes(
gene=gene,
allele_notes_dict=allele_notes_dict
)
def read_aa_profile_notes(self):
"""
Read in all the notes from the profile file (if it exists)
:return: aa_profile_dict: Dictionary of nucleotide seq type: amino acid sequence type(s)
"""
aa_profile_dict = dict()
try:
with open(self.aa_profile_notes, 'r', encoding='utf-8') as profile:
# Ignore the header
_ = profile.readline()
# Read in all the lines
for line in profile:
# Split the nucleotide sequence type from the amino acid sequence type(s)
nt_profile, aa_profiles = line.rstrip().split('\t')
# Create a list of all the amino acid sequence types
aa_profile_dict[nt_profile] = aa_profiles.split(';')
except FileNotFoundError:
pass
return aa_profile_dict
def update_aa_profile_notes(self, aa_profile_dict):
"""
Update the profile file with a novel entry
:param aa_profile_dict: Dictionary of nucleotide sequence type: amino acid sequence type(s)
"""
# Overwrite the previous notes file
with open(self.aa_profile_notes, 'w', encoding='utf-8') as profile:
# Create the header for the profile file
profile.write('nt_profile\taa_profile(s)\n')
# Iterate through all the sequence type entries in the dictionary
for nt_profile, aa_profiles in aa_profile_dict.items():
aa_profiles = ';'.join([str(profile) for profile in aa_profiles])
# Write each nucleotide profile: aa_profile link
profile.write(f'{nt_profile}\t{aa_profiles}\n')
def read_aa_allele_notes(self, gene):
"""
Read in the allele notes file. If it doesn't exist, create it
:param gene: Name of the current gene being analysed
:return: Dictionary of nucleotide allele: list of corresponding amino acid alleles
"""
allele_notes_dict = {}
# Set the name of the notes file
notes_file = os.path.join(self.aa_notes_path, f'{gene}_allele_notes.tsv')
# Attempt to read the notes file
try:
with open(notes_file, 'r', encoding='utf-8') as notes:
# Ignore the header
_ = notes.readline()
# Extract the nt allele, ';'-separated list of amino acid alleles from the line
for line in notes:
nt_allele, aa_alleles = line.rstrip().split('\t')
# Create a list of all the amino acid alleles
allele_notes_dict[int(nt_allele)] = aa_alleles.split(';')
# Create the file if it doesn't exist
except FileNotFoundError:
# Iterate through all the entries in the dictionary of gene name: nt allele: aa allele
for _, allele_dict in self.aa_nt_allele_link_dict.items():
for nt_allele, aa_allele in allele_dict.items():
# Split the entries on underscores
_, nt_allele_number = nt_allele.rsplit('_', 1)
_, aa_allele_number = aa_allele.rsplit('_', 1)
# Update the notes dictionary with int(nt allele): list(aa allele)
allele_notes_dict[int(nt_allele_number)] = [aa_allele_number]
return allele_notes_dict
def update_aa_allele_notes(self, gene, allele_notes_dict):
"""
Update the amino acid allele notes file with the novel alleles
:param gene: Current gene being analysed
:param allele_notes_dict: Dictionary of nucleotide allele: list of corresponding amino
acid alleles
"""
# Set the name and path of the notes file
notes_file = os.path.join(self.aa_notes_path, f'{gene}_allele_notes.tsv')
with open(notes_file, 'w', encoding='utf-8') as notes:
# Create the header for the notes file
notes.write('nt_allele\taa_alleles\n')
for nt_allele, aa_alleles in sorted(allele_notes_dict.items()):
# Write the nucleotide allele and list of corresponding amino acid alleles
aa_alleles = ';'.join(str(allele) for allele in aa_alleles)
notes.write(f'{str(nt_allele)}\t{aa_alleles}\n')
def __init__(self, path, amino_acid):
self.path = pathfinder(path=path)
self.allele_path = os.path.join(self.path, 'nt_alleles')
self.aa_allele_path = os.path.join(self.path, 'aa_alleles')
self.profile_path = os.path.join(self.path, 'nt_profile')
self.aa_profile_path = os.path.join(self.path, 'aa_profile')
make_path(self.profile_path)
self.profile_file = os.path.join(self.profile_path, 'profile.txt')
self.aa_profile_file = os.path.join(self.aa_profile_path, 'profile.txt')
self.query_path = os.path.join(self.path, 'query')
self.report_path = os.path.join(self.path, 'reports')
make_path(self.report_path)
novel_alleles = glob(os.path.join(self.report_path, '*.fasta'))
for novel_allele in novel_alleles:
os.remove(novel_allele)
self.aa_notes_path = os.path.join(self.path, 'aa_notes')
make_path(self.aa_notes_path)
self.aa_profile_notes = os.path.join(self.aa_notes_path, 'aa_profile_notes.tsv')
self.amino_acid = amino_acid
if not self.amino_acid:
self.combined_targets = os.path.join(self.allele_path, 'combinedtargets.fasta')
else:
self.combined_targets = os.path.join(self.aa_allele_path, 'combinedtargets.fasta')
self.gene_names = []
self.runmetadata = MetadataObject()
self.runmetadata.samples = []
self.cpus = multiprocessing.cpu_count() - 1
self.profile_report = os.path.join(self.report_path, 'nt_profiles.tsv')
self.aa_profile_report = os.path.join(self.report_path, 'aa_profiles.tsv')
try:
os.remove(self.profile_report)
except FileNotFoundError:
pass
# Fields used for custom outfmt 6 BLAST output:
self.fieldnames = ['query_id', 'subject_id', 'identical', 'mismatches', 'gaps',
'evalue', 'bit_score', 'query_length', 'subject_length', 'alignment_length',
'query_start', 'query_end', 'subject_start', 'subject_end',
'query_sequence', 'subject_sequence']
self.extended_fieldnames = self.fieldnames.copy()
self.extended_fieldnames.insert(14, 'percent_match')
self.outfmt = '6 qseqid sseqid nident mismatch gaps evalue bitscore qlen slen length ' \
'qstart qend sstart send qseq sseq'
# A string of the header to use for formatting the profile file, and the report headers
self.data = str()
self.aa_allele_dict = {}
self.aa_nt_allele_link_dict = {}
def cli():
"""
Collect the arguments, create an object, and run the script
"""
# Parser for arguments
parser = ArgumentParser(
description='Determines profiles of strains against previously calculated allele database '
'and profile. Creates and/or updates both the database of allele definitions '
'and the profile based on novel alleles and/or profiles discovered'
)
parser.add_argument(
'-p', '--path',
required=True,
help='Specify path. Note that due to code reuse, the query sequence files must be in the '
'"query" sub-folder, nucleotide alleles must be in the "nt_alleles" sub-folder, the nucleotide profile '
'must be named profile.txt and be located in the "nt_profile" sub-folder, amino acid alleles must be in '
'the "aa_allele" sub-folder, and the amino acid profile must be named profile.txt and be located in the'
'"aa_profile" sub-folder '
)
parser.add_argument(
'-aa', '--amino_acid',
action='store_true',
help='The query sequences are protein.'
)
# Get the arguments into an object
arguments = parser.parse_args()
SetupLogging(debug=True)
# Run the profiling pipeline
updater = Updater(
path=arguments.path,
amino_acid=arguments.amino_acid
)
updater.main()
logging.info('Allele Updating complete')
if __name__ == '__main__':
cli()
|
AlleleFinder
|
/AlleleFinder-0.1.4.tar.gz/AlleleFinder-0.1.4/allele_tools/allele_updater.py
|
allele_updater.py
|
# allelica-lib Documentation.
The Source Code of The Library. [Source-Code](https://github.com/4DIngenieria/allelica-lib).
For The Example Using The Class. [Source-Code](https://github.com/4DIngenieria/Allelica).
```diff
For The Installation of This Library: "pip install Allelica-pkg-Nico.4D"
```
```python
#Example Without Dask
from allelica_lib import parse
parse.process("source.xlsx", "out.tsv")
```
```python
#Example Using The Library Dask
from allelica_lib import parse
parse.process_dask("source.xlsx", "out.tsv", 4)
#'4' Is The Number of Partitions That The Dask Library Will Use When Running The Process.
```
```diff
Other Requirements (Auto-Install With The Library):
```
- pip install dask
- pip install pandas
- pip install openpyxl
|
Allelica-pkg-Nico.4D
|
/Allelica-pkg-Nico.4D-1.0.3.tar.gz/Allelica-pkg-Nico.4D-1.0.3/README.md
|
README.md
|
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature
import rsa
import base64
def generate_key(pubkey_file, privkey_file):
"""
生成密钥对
:return: (公钥对象, 私钥对象)
"""
# 生成密钥
pubkey, privkey = rsa.newkeys(1024)
# 保存密钥
with open(pubkey_file,'w+') as f:
f.write(pubkey.save_pkcs1().decode())
with open(privkey_file,'w+') as f:
f.write(privkey.save_pkcs1().decode())
return pubkey, privkey
def signature(msg, privkey):
"""
签名生成
:param msg: 签名内容
:param privkey: 私钥字符串
:return: 签名字符串
"""
privkey = rsa.PrivateKey.load_pkcs1(privkey)
return rsa.sign(msg.encode(), privkey, 'SHA-1')
def verify(msg, sign, pubkey, decode_base64=False):
"""
签名验证
request中传递sign是经过base64编码的,使用decode_base64选项解码
:return:
"""
if decode_base64:
sign = base64.b64decode(sign.encode())
if isinstance(sign, str):
sign = bytes(sign, encoding='utf-8')
if not isinstance(msg, bytes):
msg = msg.encode('utf-8')
try:
rsa.verify(msg, sign, rsa.PublicKey.load_pkcs1(pubkey))
return True
except rsa.pkcs1.VerificationError:
return False
def gen_token(secret_key, salt=None, payload=None, expires=3600):
s = Serializer(
secret_key=secret_key,
salt=salt,
expires_in=expires
)
return s.dumps(payload)
class payloadIllegalError(Exception):
def __init__(self, err="illegal payload inside. Secrete key may have been disclosed!"):
Exception.__init__(self, err)
def token_verify(token, secret_key, salt=None):
# token decoding
s = Serializer(
secret_key=secret_key,
salt=salt
)
try:
data = s.loads(token)
# 触发SignatureExpired token过期
except BadSignature as e:
encoded_payload = e.payload
if encoded_payload:
s.load_payload(encoded_payload) # 触发BadData token被篡改
raise BadSignature('BadSignature') # payload不完整
return data
|
AllenTools
|
/AllenTools-0.1.1-py3-none-any.whl/allentools/security.py
|
security.py
|
import json
import operator
import copy
import sys
import os
def _auto_update(func):
"""
自动更新装饰器
"""
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
if self.auto_update \
and not operator.eq(self._info_bak, self.info):
self.update_()
self._info_bak = copy.deepcopy(self.info)
return wrapper
class JSONKeeper:
def __init__(self, json_path, data=None, auto_update=True, section=None, update=False):
"""
:param json_path: 保存位置
:param data: 附加信息。如果key已经存在,不会覆盖,如果不存在则添加
:param auto_update: 修改值后自动更新文件
:param section: 选择json中的一个section
:param update: 如果已有内容是否使用data刷新数据
"""
# self.config_path = self._relative_to_abs(json_path)
self.config_path = json_path
self.auto_update = auto_update
# print(path)
try:
with open(self.config_path, 'r') as f:
self.info = json.load(f)
except FileNotFoundError:
print('create {} in {}'.format(self.config_path, os.getcwd()))
with open(self.config_path, 'w', encoding='utf-8') as f:
self.info = {}
json.dump(self.info, f, ensure_ascii=False, indent=4)
if section:
try:
self.info = self.info[section]
except KeyError:
raise ValueError('section {} is not defined'.format(section))
self._info_bak = copy.deepcopy(self.info) # 做深拷贝作为修改快照
if data:
if type(data) is list and type(self.info) is not list:
raise ValueError('can not append a list to json')
for i in data:
if not self.info.get(i) \
or self.info.get(i) and update:
self[i] = data[i]
@staticmethod
def _relative_to_abs(path):
filename = sys._getframe(2).f_code.co_filename # 保护方法,外界调用源栈中索引为2
file_dir = os.path.split(os.path.abspath(filename))[0] # 实现相对目录导入
if path[0] != '/' or '\\': # 处理同目录文件的情况
path = os.sep + path.replace('/', '\\')
path = file_dir + path
return path
def update_(self):
with open(self.config_path, 'w') as f:
json.dump(self.info, f, indent=4)
def __str__(self):
return str(self.info)
def keys(self):
return self.info.keys()
def values(self):
return self.info.values()
@_auto_update
def __setitem__(self, key, value):
self.info[key] = value
def __getitem__(self, item):
return self.info[item]
def find(self, key, default=None, return_list=True):
"""
查找一个key的value
使用这个方法查找数据可以保证在数据结构修改时不存在异常
:param return_list:
:param key:
:param default:
:return: List
"""
value = self._find(self.info, key)
if value is None:
return default
return value
def _find(self, dic: dict, key):
"""
find递归方法
:param dic:
:param key:
:return:
"""
value = dic.get(key)
if not value:
for d in dic.values():
if type(d) is dict:
return self._find(d, key)
return value
def set(self, pos, value):
"""
在指定位置插入/覆盖value
pos形如
layer1.layer2.layer3
将实现
this[layer1][layer2][layer3] = value
"""
pos = pos.split('.')
if len(pos) == 1:
self[pos[0]] = value
return
temp = value
for i in range(1, len(pos)):
temp = {pos[-i]: temp}
self[pos[0]] = {**(self[pos[0]] or {}), **temp} # python>3.5 required
def get(self, key, default=None):
return self.info.get(key, default)
class Config:
def to_json(self, indent=None):
s = {}
for i in self.__dir__():
if not hasattr(self.__getattribute__(i), '__call__') and i[0] != '_' and i[-1] != '_':
s[i] = self.__getattribute__(i)
return json.dumps(s, indent=indent)
def get(self, k, default=None):
if k.upper() in self.__dir__():
return self.__getattribute__(k.upper())
return default
def __setitem__(self, key, value):
self.__setattr__(key.upper(), value)
def __getitem__(self, item):
return self.__getattribute__(item.upper())
|
AllenTools
|
/AllenTools-0.1.1-py3-none-any.whl/allentools/config.py
|
config.py
|
# AllyInvest.py
A blackbox Ally Invest/TradeKing API interface for application developers.
AllyInvest.py is a part of the PyPi repository and can be installed via
```
pip install AllyInvestPy
```
AllyAPI.py
A Python3 class that allows access to all of the functionality in the
Ally/TradeKing API.
This package attempts to stay on top of changes to the API and allow an
easy to user interface with the Ally Invest API. The API does no formatting
for the user. A response format of 'xml' or 'json' can be specified and
the API responses will be returned as the raw XML or JSON, respectively.
This API was built with the developer in mind and should allow a developer
to build applications around the Ally Invest API without having to deal with
accessing and managing the requests and responses.
# Documentation
Doxygen was used to generate documentation for this interface. The generated
documentation can be found [here](http://www.anthonymorast.com/allyinvestapi/).
Perhaps the most useful documentation is of the [AllyAPI class](http://www.anthonymorast.com/allyinvestapi/classally_1_1_ally_a_p_i.html)
as this documentation shows which functionality is available and describes how to
use each function.
# Usage
Details coming soon. Some basic usage can be found in example.py until then.
# TODO
+ Documentation
+ URLs.py and examples.py
+ Implement missing functionality
+ Right now, the API implements many of the calls listed on [Ally's documentation page](https://www.ally.com/api/invest/documentation/)
but there are many not yet implemented (due to time constraints). Below is a list.
+ Adding the functionality is pretty straight forward, some more details are below.
+ Test
+ Everything
+ Add unit tests
## Adding New API Functionality
To add a new API function the API URL has to be added to the URLs class in URLs.py.
Note that the request endpoint is already stored in the class e.g.
> https://api.tradeking.com/v1/
Therefore, only anything after */v1/* needs to be added as a URL. A method to
obtain the full URL must be implemented as well, examples abound in the URLs class.
After the URL is added, implementing the POST or GET is very simple. There are two
private methods in the *AllyAPI* class that allow easily retrieving data provided
only the URL. these are *__get_data(self, url)* and *__to_format(self, response)*.
To add the new functionality, just create a method call in the *AllyAPI* class that
uses your new URL and returns/calls the *__get_date(...)* method. This will return the
raw XML or JSON response from the user depending on the format set up when creating
the *AllyAPI* class instance.
## Missing Functionality
+ MARKET
+ GET market/options/search
+ GET market/options/strikes
+ GET market/options/expirations
+ GET market/timesales
+ WATCHLIST
+ GET watchlists
+ POST watchlists
+ GET watchlists/:id
+ DELETE watchlists/:id
+ POST watchlists/:id/symbols
+ DELETE watchlists/:id/symbols
+ STREAMING OPERATIONS
+ MARKET
+ GET market/quotes
|
AllyInvestPy
|
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/README.md
|
README.md
|
class URLs:
""" The URLs class will handle all of the URLs. The purpose of this class is
to essentially store and serve all of the URL strings useful to the
Ally Invest API.
There is no processing of the URLs or the URL parameters done in this class
all of that logic is handled in the AllyAPI class.
"""
def __init__(self, response_format="json"):
"""The URLs class constructor which defines all of the URLs used by the API.
When adding new API functionality the URL needs to be added here.
Examples abound of the format used by this implementation of the API.
@param self - the object pointer
@param response_format - format of the response. Valid values are 'xml' and 'json'.
Specifying 'xml' will return an ElementTree containing the response XML while
'json' will return the response in the JSON format.
"""
self.format = response_format
self.base_url = "https://api.tradeking.com/v1/"
# self.request_token = "https://developers.tradeking.com/oauth/request_token"
# self.user_auth = "https://developers.tradeking.com/oauth/authorize"
# self.resource_owner_key = "https://developers.tradeking.com/oauth/resource_owner_key"
# account
self.accounts = "accounts.{format}".format(format=self.format)
self.accounts_balances = "accounts/balances.{format}".format(format=self.format)
self.account = "accounts/{id}.{format}".format(format=self.format, id="{id}")
self.account_balances = "accounts/{id}/balances.{format}".format(format=self.format, id="{id}")
self.account_history = "accounts/{id}/history.{format}".format(format=self.format, id="{id}")
self.account_holdings = "accounts/{id}/holdings.{format}".format(format=self.format, id="{id}")
# orders
self.orders = "accounts/{id}/orders.{format}".format(format=self.format, id="{id}")
self.order_preview = "accounts/{id}/orders/preview.{format}".format(format=self.format, id="{id}")
self.post_order = "accounts/{id}/orders.{format}".format(format="xml", id="{id}")
# market
self.clock = "market/clock.{format}".format(format=self.format)
self.quote = "market/ext/quotes.{format}".format(format=self.format)
self.news_search = "/market/news/search.{format}".format(format=self.format)
self.news_article = "market/news/{article_id}.{format}".format(format=self.format, article_id="{article_id}")
self.toplists = "market/toplists/{listtype}.{format}".format(format=self.format, listtype="{listtype}")
self.options_search = "market/options/search.{format}".format(format=self.format)
self.options_strikes = "market/options/strikes.{format}".format(format=self.format)
self.options_exps = "market/options/expirations.{format}".format(format=self.format)
# member
self.member_profile = "member/profile.{format}".format(format=self.format)
# Utilities
self.status = "utility/status.{format}".format(format=self.format)
self.version = "utility/version.{format}".format(format=self.format)
# watchlists
self.watchlists = "watchlists.{format}".format(format=self.format)
def base_url(self):
"""Returns the API request endpoint.
@param self - the object pointer
"""
return self.base_url
# def request_token(self):
# """
# @param self - the object pointer
# """
# return self.request_token
#
# def user_auth(self):
# """
# @param self - the object pointer
# """
# return self.user_auth
#
# def resource_owner_key(self):
# """
# @param self - the object pointer
# """
# return self.resource_owner_key
"""
Accounts
"""
def accounts_url(self):
"""Combines the request endpoint and accounts API URLs
@param self - the object pointer
"""
return self.base_url + self.accounts
def accounts_balances_url(self):
"""Combines the request endpoint and accounts balances API URLs
@param self - the object pointer
"""
return self.base_url + self.accounts_balances
def account_url(self):
"""Combines the request endpoint and account API URLs
@param self - the object pointer
"""
return self.base_url + self.account
def account_balances_url(self):
"""Combines the request endpoint and account balances API URLs
@param self - the object pointer
"""
return self.base_url + self.account_balances
def account_history_url(self):
"""Combines the request endpoint and account history API URLs
@param self - the object pointer
"""
return self.base_url + self.account_history
def account_holdings_url(self):
"""Combines the request endpoint and account holding API URLs
@param self - the object pointer
"""
return self.base_url + self.account_holdings
"""
ORDER/TRADE
TODO:
GET accounts/:id/orders
POST accounts/:id/orders
POST accounts/:id/orders/preview
"""
def get_orders(self):
return self.base_url + self.orders
def post_order_preview(self):
return self.base_url + self.order_preview
def get_post_order(self):
return self.base_url + self.post_order
"""
Market
TODO:
GET market/options/search
GET market/options/strikes
GET market/options/expirations
GET market/timesales
"""
def clock_url(self):
"""Combines the request endpoint and market clock API URLs
@param self - the object pointer
"""
return self.base_url + self.clock
def quote_url(self):
"""Combines the request endpoint and quote API URLs
@param self - the object pointer
"""
return self.base_url + self.quote
def news_search_url(self):
"""Combines the request endpoint and news search API URLs
@param self - the object pointer
"""
return self.base_url + self.news_search
def news_article_url(self):
"""Combines the request endpoint and news article API URLs
@param self - the object pointer
"""
return self.base_url + self.news_article
def toplists_url(self):
"""Combines the request endpoint and toplists API URLs
@param self - the object pointer
"""
return self.base_url + self.toplists
def options_search_url(self):
return self.base_url + self.options_search
def options_strikes_url(self):
return self.base_url + self.options_strikes
def options_exps_url(self):
return self.base_url + self.options_exps
"""
Member
"""
def member_profile_url(self):
"""Combines the request endpoint and member profile API URLs
@param self - the object pointer
"""
return self.base_url + self.member_profile
"""
Utilities
"""
def status_url(self):
"""Combines the request endpoint and server status API URLs
@param self - the object pointer
"""
return self.base_url + self.status
def version_url(self):
"""Combines the request endpoint and API version API URLs
@param self - the object pointer
"""
return self.base_url + self.version
"""
WATCHLIST
TODO:
GET watchlists/:id
DELETE watchlists/:id
POST watchlists/:id/symbols
DELETE watchlists/:id/symbols
"""
def get_watchlists_url(self):
"""Combines the request endpoint and watchlist get URLs. Note this is the
same URL as the POST call (URLs.post_watchlist_url()).
"""
return self.base_url + self.watchlists
def post_watchlist_url(self):
"""Combines the request endpoint and watchlist post URLs"""
return self.base_url + self.watchlists
"""
STREAMING OPERATIONS
MARKET
TODO:
GET market/quotes
"""
|
AllyInvestPy
|
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/ally/URLs.py
|
URLs.py
|
from requests_oauthlib import OAuth1
from xml.etree import ElementTree
import datetime
import requests
import json
from collections import defaultdict
import re
import copy
from .URLs import URLs
class AllyAPI:
"""The AllyAPI class providing blackbox use of the Ally Invest API.
This is the main class of the API module. This should be the only class used in
applictions built around the API. The AllyAPI class allows access to the GET and
POST requests supported by Ally Invest.
Missing Functionality:
MARKET
GET market/options/search
GET market/options/strikes
GET market/options/expirations
GET market/timesales
WATCHLIST
GET watchlists/:id
DELETE watchlists/:id
POST watchlists/:id/symbols
DELETE watchlists/:id/symbols
STREAMING OPERATIONS
MARKET
GET market/quotes
"""
def __init__(self, oauth_secret, oauth_token, client_key,
response_format="json"):
"""AllyAPI constructor. Sets the response format on all of the URLs and
the oauth/client keys required to access the API.
Parameters
@param self - the object pointer
@param oauth_secret - secret oauth key from Ally
@param oauth_token - oauth token from Ally
@param client_key - client key from Ally
@param response_format - format of the response. Valid values are 'xml' and 'json'.
Specifying 'xml' will return an ElementTree containing the response XML while
'json' will return the response in the JSON format.
"""
self.format = response_format
self.url = URLs(response_format=response_format)
self.oauth_secret = oauth_secret
self.oauth_token = oauth_token
self.client_key = client_key
self.client_secret = client_key
self.auth_time = None
self.auth = None
self.valid_auth_dt = datetime.timedelta(seconds=10)
def __create_auth(self):
"""A private method to create the OAuth1 object, if necessary."""
now = datetime.datetime.now()
if self.auth == None or self.auth_time + self.valid_auth_dt < now:
self.auth_time = now
self.auth = OAuth1(self.client_key, self.client_secret, self.oauth_token,
self.oauth_secret, signature_type='auth_header')
def __get_symbol_string(self, symbols):
"""Returns a string that is either a single quote or a comma-separated
list of quotes depending on the type of quotes.
@param self - the object pointer
@param symbols - single ticker or list of ticker to get quotes for
"""
if not isinstance(symbols, str): # list
symbols = ",".join(symbols)
return symbols
def __convert_fixml_json(self, json_data):
"""Takes the order data and converts it to a consistent format.
The FIXML message is also expanded, with the original intact.
@param self - the object pointer
@param json_data - original data to be converted.
"""
# If there's no orders, there's nothing to do.
if not json_data["response"]["orderstatus"]["order"]:
return json_data
# Copy the data to keep from overwriting the input.
data = copy.deepcopy(json_data)
# A single order will be a dict, and multiple a list.
# Convert order to always be a list of dicts.
if isinstance(data["response"]["orderstatus"]["order"], dict):
data["response"]["orderstatus"]["order"] = \
[data["response"]["orderstatus"]["order"]]
# Convert the FIXML message in each order.
# Add the keys to order itself, but preserve fixmlmessage.
for order in data["response"]["orderstatus"]["order"]:
order_xml = ElementTree.fromstring(order["fixmlmessage"])
order.update(self.__fixml_to_dict(order_xml))
# Return the converted data.
return data
def __convert_fixml_xml(self, xml_data):
"""Takes the order data and expands the FIXML message.
The original message is left intact.
@param self - the object pointer
@param xml_data - original data to be converted.
"""
# Register the FIXML namespace.
ElementTree.register_namespace("", "http://www.fixprotocol.org/FIXML-5-0-SP2")
# Copy the data to keep from overwriting the input.
data = copy.deepcopy(xml_data)
# Each order will have a "fixmlmessage" to convert.
for order in data.find("orderstatus").findall("order"):
fixml_text = order.find("fixmlmessage").text
fixml = ElementTree.fromstring(fixml_text)
order.append(fixml)
# Return the converted data.
return data
def __fixml_to_dict(self, fixml):
"""Recursively convert FIXML to a dictionary.
@param self - the object pointer
@param fixml - FIXML Element to be converted.
"""
# Remove the Namespace from the tag.
tag = re.sub(r"\{[^}]*\} *", "", fixml.tag)
# Establish the final dictionary to return.
ret = {tag: {} if fixml.attrib else None}
# Recursively convert each subelement.
# Each subelement becomes a tag key with a dict value.
children = list(fixml)
if children:
defdict = defaultdict(list)
for childdict in map(self.__fixml_to_dict, children):
for key, val in childdict.items():
defdict[key].append(val)
ret = {tag: {k: v[0] if len(v) == 1 else v
for k, v in defdict.items()}}
# Set each attribute as a tag key.
if fixml.attrib:
ret[tag].update(("@" + k, v)
for k, v in fixml.attrib.items())
# Set the value of each attribute key to the text.
if fixml.text:
text = fixml.text.strip()
if children or fixml.attrib:
if text:
ret[tag]["#text"] = text
else:
ret[tag] = text
# Return the final dictionary.
return ret
def __get_fixml(self, ticker, amount, type, account, side, tif, price, sectype):
fixml = "<FIXML xmlns=\"http://www.fixprotocol.org/FIXML-5-0-SP2\">"
fixml += "<Order"
if type != ORDER_TYPE.MARKET and tif is not None:
fixml += " TmInForce=\"{}\"".format(tif)
if type != ORDER_TYPE.MARKET:
fixml += " Px=\"{}\"".format(price)
fixml += " Typ=\"{}\" Side=\"{}\" Acct=\"{}\">".format(type, side, account)
fixml += "<Instrmt SecTyp=\"{}\" Sym=\"{}\"/>".format(sectype, ticker)
fixml += "<OrdQty Qty=\"{}\"/></Order></FIXML>".format(amount)
return fixml
def __to_format(self, response, xml=False):
"""A private method to return the API response in the desired format
@param self - the object pointer
@param response - response from the Ally Invest API
"""
if response.status_code != 200:
if response.status_code == 429:
print("Too many requests.")
exit()
elif response.status_code == 414:
print("URI too long, please chunk ticker symbols.")
exit()
if self.format == "json" and not xml:
return response.json()
else:
return ElementTree.fromstring(response.content)
def __get_data(self, url):
"""A private method to return the requested data in the requested format
for a given URL.
@param self - the object pointer
@param url - API URL to access
"""
self.__create_auth()
return self.__to_format(requests.get(url, auth=self.auth))
def __submit_post(self, url, data, headers={}, usexml=False):
"""A private method to submit a post request to the Ally Invest server
@param self - the object pointer
@param url - API URL to access
@param data - payload for the HTTP request
"""
self.__create_auth()
res = requests.post(url, headers=headers, data=data, auth=self.auth)
return self.__to_format(res, usexml)
def get_accounts(self):
"""Returns all of the user's accounts."""
return self.__get_data(self.url.accounts_url())
def get_accounts_balances(self):
"""Returns the balances of all of the user's accounts."""
return self.__get_data(self.url.accounts_balances_url())
def get_account(self, id):
"""Returns a specific account provided the account ID (account number)
@param self - the object pointer
@param id - account number
"""
return self.__get_data(self.url.account_url().format(id=str(id)))
def get_account_balances(self, id):
"""Returns the balances of a specific account (ID = account number)
@param self - the object pointer
@param id - account number
"""
return self.__get_data(self.url.account_balances_url().format(id=str(id)))
def get_account_history(self, id):
"""Returns the history of a specific account (ID = account number)
@param self - the object pointer
@param id - account number
"""
return self.__get_data(self.url.account_history_url().format(id=str(id)))
def get_account_holdings(self, id):
"""Returns the holdings of a specific account (ID = account number)
@param self - the object pointer
@param id - account number
"""
return self.__get_data(self.url.account_holdings_url().format(id=str(id)))
def get_orders(self, id):
"""Returns the orders of a specific account (ID = account number)
@param self - the object pointer
@param id - account number
"""
data = self.__get_data(self.url.get_orders().format(id=str(id)))
if self.format == "json":
return self.__convert_fixml_json(data)
return self.__convert_fixml_xml(data)
def post_order(self, id, fixml):
"""Posts an order and returns the response.
@param self - the object pointer
@param id - account number
@param fixml - FIXML string to send.
"""
headers = {
'TKI_OVERRIDE': 'true',
'Content-Type': 'application/xml',
}
# The GET and POST have the same URL.
url = self.url.get_orders().format(id=str(id))
return self.__submit_post(url, fixml, headers,
self.format=='xml')
def post_order_preview(self, id, fixml):
"""Posts an order for preview and returns the response.
@param self - the object pointer
@param id - account number
@param fixml - FIXML string to send.
"""
headers = {
'TKI_OVERRIDE': 'true',
'Content-Type': 'application/xml',
}
url = self.url.post_order_preview().format(id=str(id))
return self.__submit_post(url, fixml, headers,
self.format=='xml')
def get_market_clock(self):
"""Returns the state of the market, the time until next state change,
and current server timestamp.
@param self - the object pointer
"""
return self.__get_data(self.url.clock_url())
def get_quote(self, symbols):
"""Returns quote information for a single ticker or list of tickers.
Note: this function does not implement selecting customer FIDs as
described in the API documentation. These can be filtered from the return
if need be.
@param self - the object pointer
@param symbols - single ticker or list of ticker to get quotes for
"""
url = self.url.quote_url()+"?symbols={symbols}"
symbols = self.__get_symbol_string(symbols)
return self.__get_data(url.format(symbols=symbols))
def __get_option_quote_symbol(self, symbol, exp_date, strike, put_call):
sym = "{sym}{year}{month:02d}{day:02d}{putcall}{strike}"
strike = str(int(strike*1000)).zfill(8)
return sym.format(sym=symbol.upper(), year=str(exp_date.year)[-2:], month=exp_date.month,
day=exp_date.day, putcall=put_call.upper(), strike=strike)
def get_option_quote(self, symbol, expiration_date, strike_price, put_call):
"""Returns a quote for an option for the symbol, expiration date, strike price
and put/call specifier.
@param self - object pointer
@param symbol - underlying stock's ticker symbol
@param expiration_date - options expiration date
@param strike_price - option's strike price
@param put_call - c=call, p=put
"""
url = self.url.quote_url() + "?symbols={sym}"
if isinstance(symbol, str): # single ticker
if not isinstance(expiration_date, datetime.datetime):
print("In 'get_option_quote': datetime.datetime expected for expiration date.")
return None
sym = self.__get_option_quote_symbol(symbol, expiration_date, strike_price, put_call)
elif isinstance(symbol, list) and isinstance(expiration_date, list) \
and isinstance(strike_price, list) and isinstance(put_call, list):
if not isinstance(expiration_date[0], datetime.datetime):
print("In 'get_option_quote': datetime.datetime expected for expiration date.")
return None
request_sym = []
for i in range(len(symbol)):
request_sym.append(self.__get_option_quote_symbol(symbol[i], expiration_date[i],
strike_price[i], put_call[i]))
sym = self.__get_symbol_string(request_sym)
else:
print("In 'get_option_quote': symbol, expiration_date, strike_price, and put_call \
must all be single values or lists.")
return None
return self.__get_data(url.format(sym=sym))
def news_search(self, symbols, startdate=None, enddate=None, maxhits=10):
"""Retrieves a listing of news headlines based on symbols.
@param self - the object pointer
@param symbols - single ticker or list of ticker to get quotes for
@param startdate - search for articles between this date and enddate
@param enddate - search for articles between this date and startdate
@param maxhits - number of articles to return
"""
if startdate is None or enddate is None:
print("news_search: either enddate or startdate is not specified, ignoring both.")
if (startdate is not None and enddate is not None) and (enddate < startdate):
print("news_search: start date is after end date.")
raise Exception("Start date is after end date in news search.")
url = self.url.news_search_url() + "?symbols={syms}&maxhits={mxhits}".format(mxhits=maxhits, syms="{syms}")
if startdate is not None and enddate is not None:
url += "&startdate={sdate}&enddate={edate}" \
.format(sdate=startdate.strftime("%m/%d/%Y"), edate=enddate.strftime("%m/%d/%Y"))
return self.__get_data(url.format(syms=self.__get_symbol_string(symbols)))
def get_news_article(self, article_id):
"""Gets a single news article based on the article ID. This ID can be retrieved
from the news_search() function.
@param self - the object pointer
@param article_id - ID of the article to retrieve
"""
return self.__get_data(self.url.news_article_url().format(article_id=article_id))
def get_toplists(self, listtype="topgainers", exchange="N"):
"""Returns a ranked list depending on listtype and exchange.
@param listtype - type of list to be queried, accepted values are:
'toplosers': top losers by dollar amount
'toppctlosers': top percentage losers
'topvolume': top volume
'topactive': top active
'topgainers': top gainers by dollar amount (default)
'toppctgainers': top percentage gainers
@param exchange - exchange to be queried, accepted values are:
'A': American Stock Exchange
'N': New York Stock Exchange (default)
'Q': NASDAQ
'U': NASDAQ Bulletin Board
'V': NASDAQ OTC Other
"""
url = self.url.toplists_url().format(listtype=listtype)
url += "?exchange={ex}".format(ex=exchange)
return self.__get_data(url)
def get_options(self, symbol):
url = self.url.options_search_url() + ("?symbol={}".format(symbol))
return self.__get_data(url)
def get_options_strikes(self, symbol):
url = self.url.options_strikes_url() + ("?symbol={}".format(symbol))
return self.__get_data(url)
def get_options_expirations(self, symbol):
url = self.url.options_exps_url() + ("?symbol={}".format(symbol))
return self.__get_data(url)
def get_member_profile(self):
"""Returns general information associated with the user including account
numbers and account information.
@param self - the object pointer
"""
return self.__get_data(self.url.member_profile_url())
def get_status(self):
"""Returns an error if the API endpoint/server is unavailable. Otherwise
returns the current server timestamp.
@param self - the object pointer
"""
return self.__get_data(self.url.status_url())
def get_version(self):
"""Gets the current version of the API of the endpoint called.
@param self - the object pointer
"""
return self.__get_data(self.url.version_url())
def get_watchlists(self):
"""Retrieves all watchlists belonging to the member.
@param self - the object pointer
"""
return self.__get_data(self.url.get_watchlists_url())
def create_watchlist(self, watchlist_name, symbols=""):
"""Creates a watchlist and adds a symbol or list of symbols to a watchlist.
WARNING: There appears to be an issue when adding a list of symbols.
It is recommended that one ticker symbol is added at a time.
@param self - the object pointer
@param watchist_id - name of the watchlist
@param symbols - single ticker or list of tickers to add to the watchlist
"""
print("WARNING create_watchlist(): There appears to be an issue when adding a list of symbols. It is recommended that one ticker symbol is added at a time.")
payload = {"id": watchlist_name}
if not symbols == "":
payload["symbols"] = symbols
return self.__submit_post(self.url.post_watchlist_url(), payload)
def order_common_stock(self, ticker, shares, type, account_nbr, side,
time_in_force=None, price=None):
"""Creates an order for common stock (as opposed to options).
@param self - object pointer
@param ticker - ticker symbol of the security to purchase
@param shares - the number of shares to purchase
@param type - the order type: Market, Limit, Stop, or Stop Limit
- use the provided enum for these values
@param account_nbr - the account number for which the shares are to be purchased
@param side - the side of the trade: Buy or Sell
- use the provided enum for these values
@param time_in_force - not applicable for market orders: Day Order, Good til Cancelled, Market on Close
- use the provided enum for these values
@param price - the price to purchase the security (only for limit and stop limit orders)
"""
if price == None and type != ORDER_TYPE.MARKET:
raise("Price is required for non-market order types.")
payload = self.__get_fixml(ticker, shares, type, account_nbr, side, time_in_force, price, "CS")
headers = {
'TKI_OVERRIDE': 'true',
'Content-Type': 'application/xml',
}
url = self.url.get_post_order().format(id=account_nbr)
return self.__submit_post(url, payload, headers, True)
class TIME_IN_FORCE:
DAY = "0"
GTC = "1"
MARKET_ON_CLOSE = "7"
class ORDER_TYPE:
MARKET = "1"
LIMIT = "2"
STOP = "3"
STOP_LIMIT = "4"
class SIDE:
BUY = "1"
SELL = "2"
SELL_SHORT = "5"
|
AllyInvestPy
|
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/ally/ally.py
|
ally.py
|
class Quote():
def __init__(self):
pass
def from_xml(self, xml):
pass
def from_json(self, json):
if 'adp_100' in json:
self.adp_100 = json['adp_100']
if 'adp_200' in json:
self.adp_200 = json['adp_200']
if 'adp_50' in json:
self.adp_50 = json['adp_50']
if 'adv_21' in json:
self.adv_21 = json['adv_21']
if 'adv_30' in json:
self.adv_30 = json['adv_30']
if 'adv_90' in json:
self.adv_90 = json['adv_90']
if 'ask' in json:
self.ask = json['ask']
if 'ask_time' in json:
self.ask_time = json['ask_time']
if 'asksz' in json:
self.asksz = json['asksz']
if 'basis' in json:
self.basis = json['basis']
if 'beta' in json:
self.beta = json['beta']
if 'bid' in json:
self.bid = json['bid']
if 'bid_time' in json:
self.bid_time = json['bid_time']
if 'bidsz' in json:
self.bidsz = json['bidsz']
if 'bidtick' in json:
self.bidtick = json['bidtick']
if 'chg' in json:
self.chg = json['chg']
if 'chg_sign' in json:
self.chg_sign = json['chg_sign']
if 'chg_t' in json:
self.chg_t = json['chg_t']
if 'cl' in json:
self.cl = json['cl']
if 'contract_size' in json:
self.contract_size = json['contract_size']
if 'cusip' in json:
self.cusip = json['cusip']
if 'date' in json:
self.date = json['date']
if 'datetime' in json:
self.datetime = json['datetime']
if 'days_to_expiration' in json:
self.days_to_expiration = json['days_to_expiration']
if 'div' in json:
self.div = json['div']
if 'divexdate' in json:
self.divexdate = json['divexdate']
if 'divfreq' in json:
self.divfreq = json['divfreq']
if 'divpaydt' in json:
self.divpaydt = json['divpaydt']
if 'dollar_value' in json:
self.dollar_value = json['dollar_value']
if 'eps' in json:
self.eps = json['eps']
if 'exch' in json:
self.exch = json['exch']
if 'exch_desc' in json:
self.exch_desc = json['exch_desc']
if 'hi' in json:
self.hi = json['hi']
if 'iad' in json:
self.iad = json['iad']
if 'idelta' in json:
self.idelta = json['idelta']
if 'igamma' in json:
self.igamma = json['igamma']
if 'imp_volatility' in json:
self.imp_volatility = json['imp_volatility']
if 'incr_vl' in json:
self.incr_vl = json['incr_vl']
if 'irho' in json:
self.irho = json['irho']
if 'issue_desc' in json:
self.issue_desc = json['issue_desc']
if 'itheta' in json:
self.itheta = json['itheta']
if 'ivega' in json:
self.ivega = json['ivega']
if 'last' in json:
self.last = json['last']
if 'lo' in json:
self.lo = json['lo']
if 'name' in json:
self.name = json['name']
if 'op_delivery' in json:
self.op_delivery = json['op_delivery']
if 'op_flag' in json:
self.op_flag = json['op_flag']
if 'op_style' in json:
self.op_style = json['op_style']
if 'op_subclass' in json:
self.op_subclass = json['op_subclass']
if 'openinterest' in json:
self.openinterest = json['openinterest']
if 'opn' in json:
self.opn = json['opn']
if 'opt_val' in json:
self.opt_val = json['opt_val']
if 'pchg' in json:
self.pchg = json['pchg']
if 'pchg_sign' in json:
self.pchg_sign = json['pchg_sign']
if 'pcls' in json:
self.pcls = json['pcls']
if 'pe' in json:
self.pe = json['pe']
if 'phi' in json:
self.phi = json['phi']
if 'plo' in json:
self.plo = json['plo']
if 'popn' in json:
self.popn = json['popn']
if 'pr_adp_100' in json:
self.pr_adp_100 = json['pr_adp_100']
if 'pr_adp_200' in json:
self.pr_adp_200 = json['pr_adp_200']
if 'pr_adp_50' in json:
self.pr_adp_50 = json['pr_adp_50']
if 'pr_date' in json:
self.pr_date = json['pr_date']
if 'pr_openinterest' in json:
self.pr_openinterest = json['pr_openinterest']
if 'prbook' in json:
self.prbook = json['prbook']
if 'prchg' in json:
self.prchg = json['prchg']
if 'prem_mult' in json:
self.prem_mult = json['prem_mult']
if 'put_call' in json:
self.put_call = json['put_call']
if 'pvol' in json:
self.pvol = json['pvol']
if 'qcond' in json:
self.qcond = json['qcond']
if 'rootsymbol' in json:
self.rootsymbol = json['rootsymbol']
if 'secclass' in json:
self.secclass = json['secclass']
if 'sesn' in json:
self.sesn = json['sesn']
if 'sho' in json:
self.sho = json['sho']
if 'strikeprice' in json:
self.strikeprice = json['strikeprice']
if 'symbol' in json:
self.symbol = json['symbol']
if 'tcond' in json:
self.tcond = json['tcond']
if 'timestamp' in json:
self.timestamp = json['timestamp']
if 'tr_num' in json:
self.tr_num = json['tr_num']
if 'tradetick' in json:
self.tradetick = json['tradetick']
if 'trend' in json:
self.trend = json['trend']
if 'under_cusip' in json:
self.under_cusip = json['under_cusip']
if 'undersymbol' in json:
self.undersymbol = json['undersymbol']
if 'vl' in json:
self.vl = json['vl']
if 'volatility12' in json:
self.volatility12 = json['volatility12']
if 'vwap' in json:
self.vwap = json['vwap']
if 'wk52hi' in json:
self.wk52hi = json['wk52hi']
if 'wk52hidate' in json:
self.wk52hidate = json['wk52hidate']
if 'wk52lo' in json:
self.wk52lo = json['wk52lo']
if 'wk52lodate' in json:
self.wk52lodate = json['wk52lodate']
if 'xdate' in json:
self.xdate = json['xdate']
if 'xday' in json:
self.xday = json['xday']
if 'xmonth' in json:
self.xmonth = json['xmonth']
if 'xyear' in json:
self.xyear = json['xyear']
if 'yield' in json:
self.yld = json['yield']
|
AllyInvestPy
|
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/ally/responses/quote.py
|
quote.py
|
from xml.etree import ElementTree
def get_class_vars(class_type):
return [v for k, v in vars(class_type).items() if \
not k.startswith('_')]
class Order():
def __init__(self, **data):
# From Request Object.
self.acct = data.get('acct')
# Stock Symbol.
self.sym = data.get('sym')
# Number of Shares.
if data.get('qty'):
self.qty = str(int(data.get('qty', '0')))
# use class SECURITY_TYPE
self.sec_typ = str(data.get('sec_typ', ''))
# use class SIDE
self.side = str(data.get('side', ''))
# use class ORDER_TYPE
self.typ = str(data.get('typ', ''))
# For Changes and Cancels
self.ord_id = data.get('ord_id')
# For Buying to Cover a Short Position.
self.acct_typ = data.get('acct_typ')
# For Limit Orders
# Limit Price.
self.px = f"{data.get('px', 0.0):.2f}"
# use class TIME_IN_FORCE
self.tm_in_force = str(data.get('tm_in_force', ''))
# For Options
# use class OPTION_POSITION
self.pos_efct = str(data.get('pos_efct', ''))
# Strike Price.
self.strk_px = f"{data.get('strk_px', 0.0):.2f}"
# use class OPTION_CLASS
self.cfi = str(data.get('cfi', ''))
# Date of Maturity.
self.mat_dt = data.get('mat_dt')
# Option Expiration.
self.mmy = data.get('mmy')
def validate(self):
"""Verify all required information is in the order.
"""
# Account must exist.
assert self.acct
# Symbol must exist.
assert self.sym
# Quantity must exist and be an integer greater than zero.
# Partials are sold when # shares held is less than one.
assert int(self.qty) > 0
# Order type must exist and be in ORDER_TYPE.
assert self.typ
assert self.typ in get_class_vars(ORDER_TYPE)
# Side must exist and be in SIDE.
assert self.side
assert self.side in get_class_vars(SIDE)
# Security type must exist and be in SECURITY_TYPE.
assert self.sec_typ
assert self.sec_typ in get_class_vars(SECURITY_TYPE)
# If Account Type is used, it must be in ACCOUNT_TYPE.
if self.acct_typ:
assert self.acct_type in get_class_vars(ACCOUNT_TYPE)
if self.typ != ORDER_TYPE.MARKET:
# Time in Force must exist and be in TIME_IN_FORCE.
assert self.tm_in_force
assert self.tm_in_force in get_class_vars(TIME_IN_FORCE)
if self.typ in [ORDER_TYPE.LIMIT, ORDER_TYPE.STOP_LIMIT]:
# Price must exist and be a float greater than zero.
assert self.px
assert float(self.px) > 0.0
if self.sec_typ == SECURITY_TYPE.OPTION:
# Position must exist and be in OPTION_POSITION.
assert self.pos_efct
assert self.pos_efct in get_class_vars(OPTION_POSITION)
# Strike Price must exist & be a float greater than zero.
assert self.strk_px
assert float(self.strk_px) > 0.0
# CFI must exist and be in OPTION_CLASS.
assert self.cfi
assert self.cfi in get_class_vars(OPTION_CLASS)
# Date of Maturity must exist.
assert self.mat_dt
# Option Expiration must exist.
assert self.mmy
return True
def from_xml(self, xml):
nsp = {'': 'http://www.fixprotocol.org/FIXML-5-0-SP2'}
exec_rpt = xml.find('FIXML', nsp).find('ExecRpt', nsp)
if not exec_rpt:
return
self.ord_id = exec_rpt.attrib.get('OrdID')
self.id = exec_rpt.attrib.get('ID')
self.stat = exec_rpt.attrib.get('Stat')
self.acct = exec_rpt.attrib.get('Acct')
self.acct_typ = exec_rpt.attrib.get('AcctTyp')
self.side = exec_rpt.attrib.get('Side')
self.typ = exec_rpt.attrib.get('Typ')
self.px = exec_rpt.attrib.get('Px')
self.tm_in_force = exec_rpt.attrib.get('TmInForce')
self.leaves_qty = exec_rpt.attrib.get('LeavesQty')
self.trd_dt = exec_rpt.attrib.get('TrdDt')
self.txn_tm = exec_rpt.attrib.get('TxnTm')
instrmt = exec_rpt.find('Instrmt', nsp)
if instrmt:
self.sym = instrmt.attrib.get('Sym')
self.sec_typ = instrmt.attrib.get('SecTyp')
self.desc = instrmt.attrib.get('Desc')
ordqty = exec_rpt.find('OrdQty', nsp)
if ordqty:
self.qty = ordqty.attrib.get('Qty')
comm = exec_rpt.find('Comm', nsp)
if comm:
self.comm = comm.attrib.get('Comm')
def from_json(self, json):
exec_rpt = json.get('FIXML', {}).get('ExecRpt')
if not exec_rpt:
return
self.ord_id = exec_rpt.get('@OrdID')
self.id = exec_rpt.get('@ID')
self.stat = exec_rpt.get('@Stat')
self.acct = exec_rpt.get('@Acct')
self.acct_typ = exec_rpt.get('@AcctTyp')
self.side = exec_rpt.get('@Side')
self.typ = exec_rpt.get('@Typ')
self.px = exec_rpt.get('@Px')
self.tm_in_force = exec_rpt.get('@TmInForce')
self.leaves_qty = exec_rpt.get('@LeavesQty')
self.trd_dt = exec_rpt.get('@TrdDt')
self.txn_tm = exec_rpt.get('@TxnTm')
instrmt = exec_rpt.get('Instrmt')
if instrmt:
self.sym = instrmt.get('@Sym')
self.sec_typ = instrmt.get('@SecTyp')
self.desc = instrmt.get('@Desc')
ordqty = exec_rpt.get('OrdQty')
if ordqty:
self.qty = ordqty.get('@Qty')
comm = exec_rpt.get('Comm')
if comm:
self.comm = comm.get('@Comm')
def to_fixml(self, cancel=False):
"""Convert the contents of the order to FIXML.
This is only for common stock and single-leg options.
@param self - the object pointer
@param cancel - should this order be cancelled only?
"""
nsp = {'xmlns': 'http://www.fixprotocol.org/FIXML-5-0-SP2'}
order_tag = "Order"
if self.ord_id and cancel:
order_tag = "OrdCxlReq"
elif self.ord_id:
order_tag = "OrdCxlRplcReq"
# Now Build the FIXML.
# First get the attributes always required.
base_xml = ElementTree.Element("FIXML", nsp)
order = ElementTree.SubElement(base_xml, order_tag)
order.set('Acct', self.acct)
order.set('Typ', self.typ)
order.set('Side', self.side)
instrmt = ElementTree.SubElement(order, 'Instrmt')
instrmt.set('SecTyp', self.sec_typ)
instrmt.set('Sym', self.sym)
ordqty = ElementTree.SubElement(order, 'OrdQty')
ordqty.set('Qty', self.qty)
# Add field-dependent attributes.
if self.ord_id:
order.set('OrigID', self.ord_id)
if self.side == SIDE.BUY and \
self.acct_typ == ACCOUNT_TYPE.SHORT:
order.set('AcctTyp', self.acct_typ)
if self.typ != ORDER_TYPE.MARKET:
order.set('TmInForce', self.tm_in_force)
if self.typ != ORDER_TYPE.STOP:
order.set('Px', self.px)
if self.sec_typ == SECURITY_TYPE.OPTION:
order.set('PosEfct', self.pos_efct)
instrmt.set('CFI', self.cfi)
instrmt.set('StrkPx', self.strk_px)
instrmt.set('MMY', self.mmy)
instrmt.set('MatDt', self.mat_dt.isoformat())
return base_xml
def get_multileg_fixml(orders, cancel=False):
"""Create FIXML for a multi-leg option chain using multiple orders.
@param orders - A list of the orders.
@param cancel - should this order be cancelled only?
"""
if not orders:
return None
# Validate all the orders have the same basic info.
orders[0].validate()
chk_dict = dict(ord_id=orders[0].ord_id,
typ=orders[0].typ,
tm_in_force=orders[0].tm_in_force,
px=orders[0].px,
acct=orders[0].acct,
sym=orders[0].sym,
)
for order in orders[1:]:
# Along with validation, all basic data must match.
order.validate()
assert order.ord_id == chk_dict['ord_id']
assert order.typ == chk_dict['typ']
assert order.tm_in_force == chk_dict['tm_in_force']
assert order.px == chk_dict['px']
assert order.acct == chk_dict['acct']
assert order.sym == chk_dict['sym']
# Set the Namespace and base tag name.
nsp = {'xmlns': 'http://www.fixprotocol.org/FIXML-5-0-SP2'}
order_tag = "NewOrdMLeg"
if chk_dict['ord_id'] and cancel:
order_tag = "OrdCxlReq"
elif chk_dict['ord_id']:
order_tag = "MLegOrdCxlRplc"
# Now Build the FIXML.
# First get the attributes always required.
base_xml = ElementTree.Element("FIXML", nsp)
mleg = ElementTree.SubElement(base_xml, order_tag)
mleg.set('Acct', str(chk_dict['acct']))
if chk_dict['ord_id']:
# For a replace or cancel, the original ID is needed.
mleg.set('OrigCIOrdID', chk_dict['ord_id'])
if chk_dict['ord_id'] and cancel:
# For a cancel, the FIXML is much simpler.
instrmt = ElementTree.SubElement(mleg, 'Instrmt')
instrmt.set('SecTyp', SECURITY_TYPE.MLEG)
instrmt.set('Sym', chk_dict['sym'])
else:
# For all others, fill in the rest of the info for each leg.
mleg.set('OrdTyp', chk_dict['typ'])
if chk_dict['typ'] == ORDER_TYPE.LIMIT:
mleg.set('TmInForce', chk_dict['tm_in_force'])
mleg.set('Px', str(chk_dict['px']))
for order in orders:
# Cycle through each order and add it.
ord_el = ElementTree.SubElement(mleg, 'Ord')
ord_el.set('OrdQty', str(order.qty))
ord_el.set('PosEfct', str(order.pos_efct))
leg = ElementTree.SubElement(ord_el, 'Leg')
leg.set('Side', str(order.side))
leg.set('Strk', str(order.strk_px))
leg.set('Mat', str(order.mat_dt.isoformat()))
leg.set('MMY', str(order.mmy))
leg.set('SecTyp', str(order.sec_typ))
leg.set('CFI', str(order.cfi))
leg.set('Sym', order.sym)
return base_xml
class TIME_IN_FORCE:
DAY = "0"
GTC = "1"
MARKET_ON_CLOSE = "7"
class ACCOUNT_TYPE:
SHORT = "5"
class ORDER_TYPE:
MARKET = "1"
LIMIT = "2"
STOP = "3"
STOP_LIMIT = "4"
class SIDE:
BUY = "1"
SELL = "2"
SELL_SHORT = "5"
class SECURITY_TYPE:
COMMON_STOCK = "CS"
OPTION = "OPT"
MULTI_LEG = "MLEG"
class OPTION_POSITION:
OPEN = "O"
CLOSE = "C"
class OPTION_CLASS:
CALL = "OC"
PUT = "OP"
|
AllyInvestPy
|
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/ally/responses/order.py
|
order.py
|
class Holding():
def __init__(self):
pass
def from_xml(self, xml):
pass
def from_json(self, json):
# Feeling thankful for Notepad++ macros...
if 'accounttype' in json:
self.accounttype = json['accounttype']
if 'assetclass' in json:
self.assetclass = json['assetclass']
if 'cfi' in json:
self.cfi = json['cfi']
if 'change' in json['quote']:
self.change = json['quote']['change']
if 'costbasis' in json:
self.costbasis = json['costbasis']
if 'cusip' in json['instrument']:
self.cusip = json['instrument']['cusip']
if 'desc' in json['instrument']:
self.desc = json['instrument']['desc']
if 'factor' in json['instrument']:
self.factor = json['instrument']['factor']
if 'gainloss' in json:
self.gainloss = json['gainloss']
if 'lastprice' in json['quote']:
self.lastprice = json['quote']['lastprice']
if 'marketvalue' in json:
self.marketvalue = json['marketvalue']
if 'marketvaluechange' in json:
self.marketvaluechange = json['marketvaluechange']
if 'matdt' in json['instrument']: # simplex options use matdt
self.matdt = json['instrument']['matdt']
if 'mat' in json['instrument']: # multilegs use mat
self.matdt = json['instrument']['mat']
if 'mmy' in json:
self.mmy = json['mmy']
if 'mult' in json:
self.mult = json['mult']
if 'price' in json:
self.price = json['price']
if 'purchaseprice' in json:
self.purchaseprice = json['purchaseprice']
if 'putcall' in json['instrument']:
self.putcall = json['instrument']['putcall']
if 'qty' in json:
self.qty = json['qty']
if 'sectyp' in json['instrument']:
self.sectyp = json['instrument']['sectyp']
if 'strkpx' in json['instrument']:
self.strkpx = json['instrument']['strkpx']
if 'sym' in json['instrument']:
self.sym = json['instrument']['sym']
if 'totalsecurities' in json:
self.totalsecurities = json['totalsecurities']
|
AllyInvestPy
|
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/ally/responses/holding.py
|
holding.py
|
# Arnaud Legoux Moving Average (ALMA) in Python
This is a small Technical Analysis library for the calculation of Arnaud Legoux Moving Average (ALMA). It is built in Pandas and Numpy and uses [TA](https://github.com/bukosabino/ta).
## Description
The Arnaud Legoux Moving Average (ALMA) indicator is a superior moving average as compared to the Exponential Moving and Simple Moving Averages. Arnaud Legoux and Dimitrios Kouzis Loukas developed the indicator in the year 2009. The objective of ALMA is to minimize the noise and produce a more reliable signal than the conventional moving averages. The indicator (ALMA) removes small price fluctuations and also enhances the trend by applying a moving average (MA) twice, once from left to right, and once from right to left.
## Key Points on ALMA Indicator
- ALMA indicator works on the principle of the Moving Average (MA), but the calculation formula is more perfect.
- The main difference in regards to conventional moving averages is its minimal lag.
- The classic EMA, SMA, SMMA and other Moving Average lines have a significant minus – signal lag.
- The MA ALMA in this regard is more perfect. In a volatile market, this tool shows very good trading results, even without the use of auxiliary filters.
The Arnaud Legoux moving average attempts to bridge the gap and thus is expected to show both responsiveness and smoothness at the same time. Generally, the Arnaud Legoux Moving Average indicator applies the moving average twice, once from left to right and the other from right from left with the process said to eliminate price lag or phase shift significantly, a problem that is common to the traditional moving averages.
Source: Read more on [Stock Maniacs](https://www.stockmaniacs.net/arnaud-legoux-moving-average-indicator/)
## How to Use
```python
# import the package
from AlmaIndicator import ALMAIndicator
# define a Pandas dataframe which should contain closing price of the stocks
df = ...
# initialize the indicator
# you only need to provide a Pandas series of the closing price of the stock
alma_indicator = ALMAIndicator(close=df['close'])
# add alma to dataframe by calling the alma function
df['alma'] = alma_indicator.alma()
```
## Sample Result

Note that there will be `NaN` values at the beginning of the ALMA series since there is a window of analysis which is set to 9. In other words, your dataset must at least contain 9 entries.
## Sample Figure

|
AlmaIndicator
|
/AlmaIndicator-0.0.4.tar.gz/AlmaIndicator-0.0.4/README.md
|
README.md
|
# practicaUUF4
Codigo de ejemplo para utilizar el paquete:
import self as self
from Almacen import Almacen
from Clases.Bebida import Bebida
class Main:
a = Almacen
bebida = Bebida
bebida = [[1, 1, 'Bezoya', 2, 2, 'Guadarrama', '', ''],
[2, 1, 'Fanta', 1.5, 1.5, '', 'True', 'Free'],
[3, 1, 'Coca-Cola', 1.75, 2, '', 'False', '5%'],
[4, 1, 'Viladrau', 1.5, 1.5, 'Fontalegre', '', ''],
[5, 1, 'Pepsi', 1.85, 1, '', 'True', '6%'],
[6, 2, 'Fontvella', 2.5, 2, 'Girona', '', ''],
[7, 2, 'Bezoya', 2, 1, 'Guadarrama', '', ''],
[8, 2, 'Viladrau', 1.5, 2, 'Fontalegre', '', ''],
[9, 2, 'Trina', 1.75, 1.5, '', 'False', '6%'],
[10, 2, 'Coca-Cola', 1.75, 1.5, '', 'False', 'Free']]
Almacen().afegirBeguda(bebida)
print("¡Bebidas añadidas!")
a.mostrarEstanteria(self)
print("El precio total de todas las bebidas es: " + a.calcularPrecioTotal(self))
print("El precio total de las bebidas de la marca Bezoya es: " + a.calcularPrecioMarca(self, "Fanta"))
print("El precio total de la columna 1: " + a.calcularPrecioColumna(self, 1))
print("Se va a eliminar una bebida")
a.eliminarBebida(self, 3)
a.mostrarEstanteria(self)
print("El precio total de todas las bebidas es: " + a.calcularPrecioTotal(self))
print("El precio total de las bebidas de la marca Bezoya es: " + a.calcularPrecioMarca(self, "Bezoya"))
print("El precio total de la columna 2: " + a.calcularPrecioColumna(self, 2))
|
AlmacenBebidasAlba1
|
/AlmacenBebidasAlba1-1.1.5.tar.gz/AlmacenBebidasAlba1-1.1.5/README.md
|
README.md
|
"""Colored custom logger module by ahmetkkn07 (Python3.6+ Compatible)"""
# =============================================================================
# Imports
# =============================================================================
import os
import datetime
import inspect
class LogLevel:
FATAL = 900
ERROR = 800
WARNING = 700
INFO = 600
DEBUG = 500
TRACE = 400
TEST = 300
ALL = 100
class Term:
BOLD = '\033[1m'
REVERSE = "\033[;7m"
CLEAR = '\033[0m'
RED = '\033[91m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
BLUE = '\033[94m'
# unused
CYAN = '\033[96m'
class Alogger:
def __init__(
self,
path="",
log_level=LogLevel.ALL,
log_to_file=True,
log_name=None,
log_file_type="txt"
) -> None:
"""Constructor of Alogger class.
Args:
log_level (LogLevel, optional): Set level to log. Defaults to ALL
log_to_file (bool, optional): Set True if you want to save logs
to file. Defaults to False.
log_name (str, optional): Custom file name for log file.
Defaults to caller filename.
log_file_type (str, optional): Type of file that saved logs.
Defaults to 'txt', can set to 'html'.
"""
self.log_level = log_level
self.log_to_file = log_to_file
self.log_file_type = log_file_type
self.caller_filename = f"{inspect.stack()[1].filename.split('.py')[0]}"
if os.name == "nt":
self.caller_filename = self.caller_filename.split("\\")[-1]
elif os.name == "posix":
self.caller_filename = self.caller_filename.split("/")[-1]
if path != "":
self.path = path
else:
self.path = os.curdir
if log_to_file:
if log_name is not None:
self.log_name = log_name
else:
if log_file_type == "html":
self.log_name = f"{self.caller_filename}_log.html"
if os.name == "nt":
self.log_name = self.log_name.split("\\")[-1]
elif os.name == "posix":
self.log_name = self.log_name.split("/")[-1]
elif log_file_type == "txt":
self.log_name = f"{self.caller_filename}.log"
if os.name == "nt":
self.log_name = self.log_name.split("\\")[-1]
elif os.name == "posix":
self.log_name = self.log_name.split("/")[-1]
def fatal(self, *messages) -> None:
if self.log_level <= LogLevel.FATAL:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.REVERSE}{Term.RED}FATAL: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "FATAL")
self._write_to_file(message)
def error(self, *messages) -> None:
if self.log_level <= LogLevel.ERROR:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.RED}{Term.BOLD}ERROR: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "ERROR")
self._write_to_file(message)
def warning(self, *messages) -> None:
if self.log_level <= LogLevel.WARNING:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.YELLOW}{Term.BOLD}WARNING: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "WARNING")
self._write_to_file(message)
def info(self, *messages) -> None:
if self.log_level <= LogLevel.INFO:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.GREEN}{Term.BOLD}INFO: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "INFO")
self._write_to_file(message)
def debug(self, *messages) -> None:
if self.log_level <= LogLevel.DEBUG:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.BLUE}{Term.BOLD}DEBUG: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "DEBUG")
self._write_to_file(message)
def trace(self, *messages) -> None:
if self.log_level <= LogLevel.TRACE:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.PURPLE}{Term.BOLD}TRACE: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "TRACE")
self._write_to_file(message)
def test(self, *messages) -> None:
if self.log_level <= LogLevel.TEST:
caller = inspect.stack()[1] # 0 represents this line
frame = caller[0]
info = inspect.getframeinfo(frame)
caller = f"@{self.caller_filename}.{inspect.stack()[1][3]}:"\
+ f"{info.lineno}"
caller = caller.replace("<module>", "_")
messages = [str(message) for message in messages]
print(
f"{Term.REVERSE}{Term.BOLD}TEST: {' '.join(messages)}. "
+ f"{caller}{Term.CLEAR}")
message = self._create_message(messages, caller, "TEST")
self._write_to_file(message)
def _write_to_file(self, message: str):
if self.log_to_file:
os.chdir(self.path)
with open(self.log_name, "a+") as file:
file.write(f"{message}\n\n")
def _create_message(self, messages, caller, log_type):
now = datetime.datetime.now()
message = ""
if self.log_file_type == "html":
message = '<div style="background-color:#FF5C57; '\
+ f'color: #282A36;">{now} {log_type}: {" ".join(messages)}. '\
+ f'{caller} < /div >'
elif self.log_file_type == "txt":
message = f'{now} {log_type}: {" ".join(messages)}. {caller}'
return message
|
AloggerPy
|
/AloggerPy-1.1.tar.gz/AloggerPy-1.1/src/Alogger/Alogger.py
|
Alogger.py
|
# Alp software: Alp time tools
# Copyright (C) 2010 Niels Serup
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Version:...... 0.1.0
# Maintainer:... Niels Serup <[email protected]>
# Website:...... http://metanohi.org/projects/alp/
# Development:.. http://gitorious.org/Alp
version = (0, 1, 0)
__doc__ = """\
alp
"""
import sys
from datetime import datetime, timedelta
import re
import time as time_module
try:
import curses
_has_curses = True
except ImportError:
_has_curses = False
try:
import termcolor
_has_termcolor = True
colored_orig = termcolor.colored
_normal_esc_seq = colored_orig('>', 'grey').split('>')[1]
def _colored(color, typ):
if color == 'black':
color = 'grey'
if typ == 'bg':
fg_color = None
bg_color = 'on_' + color
elif typ == 'fg':
fg_color = color
bg_color = None
return colored_orig('>', fg_color, bg_color).split('>')[0]
def _formatted(typ):
try:
return colored_orig('>', attrs=[typ]).split('>')[0]
except KeyError:
if typ == 'normal':
return _normal_esc_seq
else:
return ''
except ImportError:
_has_termcolor = False
_colored = lambda color, typ: ''
_formatted = lambda typ: ''
######################################################################
# Basic constants
_epoch = datetime(2010, 10, 15, 12, 00, 00)
_one_alp = 2 ** 18
_hexalp_divide = 2 ** 14
_qvalp_divide = 2 ** 12
_salp_divide = 2 ** 8
_talp_divide = 2 ** 4
class AlpTime(object):
"""The Alp time object"""
seconds_since_epoch=None
seconds=None
alp=None
hexalp=None
qvalp=None
salp=None
talp=None
second=None
def __init__(self):
self.speed = 1
self.set_start_date()
def set_start_date(self, date=None):
"""Set the start date (using Python's datetime module)"""
now = datetime.utcnow()
if date is None:
date = now
self.start_date = date
self.start_diff = self.start_date - _epoch
self.now_diff = self.start_date - now
self.update()
def get_start_date(self):
"""Get the start date"""
return self.start_date
def set_speed(self, speed=1):
"""Set the debug speed"""
self.speed = speed
def get_speed(self):
"""Get the debug speed"""
return self.speed
def get_seconds_since_epoch(self, date=None):
"""Get the number of seconds since epoch"""
if date is None:
date = datetime.utcnow() + self.now_diff
diff = self.start_diff + (date - self.start_date) * self.speed
return diff.days * 86400 + diff.seconds, diff
def update(self, date=None):
"""Update the internal time"""
passed, diff = self.get_seconds_since_epoch(date)
self.date = self.start_date + diff
self.real_date = self.start_date + diff - self.start_diff
seconds_total = passed % _one_alp
alp = passed / _one_alp
seconds_left = seconds_total
hexalp = seconds_left / _hexalp_divide
seconds_left -= hexalp * _hexalp_divide
qvalp = seconds_left / _qvalp_divide
seconds_left -= qvalp * _qvalp_divide
salp = seconds_left / _salp_divide
seconds_left -= salp * _salp_divide
talp = seconds_left / _talp_divide
seconds_left -= talp * _talp_divide
second = seconds_left
self.seconds_since_epoch = passed
self.alp = int(alp)
self.seconds = int(seconds_total)
self.hexalp = int(hexalp)
self.qvalp = int(qvalp)
self.salp = int(salp)
self.talp = int(talp)
self.second = int(second)
def __str__(self):
return 'AlpTime{alp: %d, hexalp: %d, qvalp: %d, \
salp: %d, talp: %d, second: %d}' % \
(self.alp, self.hexalp, self.qvalp,
self.salp, self.talp, self.second)
time = AlpTime()
def update(date=None):
"""Update the internal time"""
time.update(date)
def set_start_date(date=None):
"""Set the start date (using Python's datetime module)"""
time.set_start_date(date)
def set_speed(speed=1):
"""Set the debug speed"""
time.set_speed(speed)
def get_seconds_since_epoch(date=None):
"""Get the number of seconds since epoch"""
return time.get_seconds_since_epoch(date)
def alp_to_datetime(alp, hexalp, qvalp, salp, talp, second):
"""Return a datetime object of the given Alp date"""
date = [alp, hexalp, qvalp, salp, talp, second]
date[0] = int(date[0])
try:
for i in range(len(date) - 1):
date[i + 1] = int(date[i + 1], 16)
except TypeError:
pass
secs = date[0] * _one_alp + date[1] * _hexalp_divide + \
date[2] * _qvalp_divide + date[3] * _salp_divide + \
date[4] * _talp_divide + date[5]
return _epoch + timedelta(seconds=secs)
######################################################################
# Using curses without initscr
_curses_colors = ('BLUE', 'GREEN', 'CYAN', 'RED', 'MAGENTA', 'YELLOW',
'WHITE', 'BLACK')
_curses_controls = {
'up': 'cuu1', 'down': 'cud1', 'left': 'cub1', 'right': 'cuf1',
'clear_screen': 'clear', 'clear_line': 'el', 'bold': 'bold', 'blink': 'blink', 'dim': 'dim',
'reverse': 'rev', 'underline': 'smul', 'normal': 'sgr0',
'hide_cursor': 'civis', 'show_cursor': 'cnorm'
}
_formatter_control_regex = re.compile(r'!\((.+?)\)')
_formatter_bg_color_regex = re.compile(r'#\((.+?)\)')
_formatter_fg_color_regex = re.compile(r'\$\((.+?)\)')
_formatter_codes_regex = re.compile(r'[!#\$]\(.+?\)')
_formatter_not_all_codes_regex = re.compile(
r'([#\$]\(.+?\)|!\((bold|underline|blink|reverse)\))')
def unformat(text, also_controls=True):
"""Remove formatting codes from text"""
if also_controls:
return _formatter_codes_regex.sub('', text)
else:
return _formatter_not_all_codes_regex.sub('', text)
def _textlen(text):
return len(unformat(text).decode('utf-8'))
class BaseFormatter(object):
"""A generic text formatting generator"""
def _generate_part(self, attr, obj):
return ''
def generate(self, text, put=False):
"""
Generate formatted text according to these codes:
Foreground color codes: $(color) (e.g. $(green))
Background color codes: #(color) (e.g. #(red))
Formatting codes: !(code) (e.g. !(up))
You can use blue, green, cyan, red, magenta, yellow, white and
black as colors.
These are the formatting codes with their actions and curses
equivalents:
code | description | in curses
-------------+----------------------+----------
up | one line up | cuu1
down | one line down | cud1
left | one char left | cub1
right | one char right | cuf1
clear_screen | clear screen | clear
clear_line | clear line | el
bold | bold text | bold
blink | blinking text | blink
dim | dim text | dim
reverse | reverse text | rev
underline | underline text | smul
normal | reset all formatting | sgr0
hide_cursor | hide cursor | civis
show_cursor | show cursor | cnorm
"""
text = _formatter_control_regex.sub(
lambda obj: self._generate_part('controls', obj), text)
text = _formatter_bg_color_regex.sub(
lambda obj: self._generate_part('bg_colors', obj), text)
text = _formatter_fg_color_regex.sub(
lambda obj: self._generate_part('fg_colors', obj), text)
if put:
sys.stdout.write(text)
return text
def clear(self):
"""Clear line"""
self.generate('!(clear_line)', True)
def _end(self):
pass
class _CursesControls(BaseFormatter):
"""
A text formatting generator and a container of curses escape
sequences
"""
def __init__(self):
self.bg_colors = {}
self.fg_colors = {}
self.controls = {}
self.cols = 0
self.lines = 0
if not sys.stdout.isatty():
return
try:
curses.setupterm()
except Exception, e:
return
bg_seq = curses.tigetstr('setab') or curses.tigetstr('setb') or ''
fg_seq = curses.tigetstr('setaf') or curses.tigetstr('setf') or ''
# Get escape sequences
for color in _curses_colors:
index = getattr(curses, 'COLOR_%s' % color)
color = color.lower()
self.bg_colors[color] = curses.tparm(bg_seq, index)
self.fg_colors[color] = curses.tparm(fg_seq, index)
for control in _curses_controls:
self.controls[control] = curses.tigetstr(_curses_controls[control]) or ''
self.cols = curses.tigetnum('cols')
self.lines = curses.tigetnum('lines')
def _generate_part(self, attr, obj):
return self.__getattribute__(attr)[obj.groups(1)[0]]
def _end(self):
print formatter.generate('!(normal)!(show_cursor)!(up)')
class _FakeCursesControls(BaseFormatter):
"""A text formatting generator without curses"""
def _generate_part(self, attr, obj):
obj = obj.groups(1)[0]
if attr == 'bg_colors':
return _colored(obj, 'bg')
elif attr == 'fg_colors':
return _colored(obj, 'fg')
elif attr == 'controls':
return _formatted(obj)
# Else
return ''
def _end(self):
print formatter.generate('!(normal)')
formatter = None
def start_formatter(use_curses=True):
"""
Start the formatter, making the global formatter variable into an
instance of a class related with the BaseFormatter class
"""
global formatter, start_formatter, _using_curses
if not _has_curses or not use_curses:
_using_curses = False
formatter = _FakeCursesControls()
else:
formatter = _CursesControls()
_using_curses = True
start_formatter = lambda *arg: True
return formatter
######################################################################
# Date formats
## a '#' in the end returns the number in hex
## an '_' in the end followed by a number prepends
# (<number> - len(str(number)) zeroes to the number
## Available "units":
### seconds, seconds_since_epoch, alp, hexalp, qvalp, salp, talp, second
_default_hex_date_format = '\
!(bold)#(black)$(yellow)ALP\
#(cyan)$(yellow)&(alp_4)\
#(blue)$(white)/\
#(black)$(green)&(hexalp#)\
$(cyan)&(qvalp)\
$(red)&(salp#)\
&(talp#)\
$(white)&(second#)'
# e.g. 2403/93EC2
_date_format_unit_regex = re.compile(r'&\((.+?)\)')
def _format_replace(obj):
unit = obj.groups(1)[0]
zero_prepend = '_' in unit
as_hex = unit.endswith('#')
if zero_prepend:
spl = unit.split('_')
unit = spl[0]
z_num = spl[1]
elif as_hex:
unit = unit[:-1]
value = time.__dict__[unit]
if zero_prepend:
return ('%0' + z_num + 'd') % value
elif as_hex:
return hex(value)[2:].upper()
else:
return str(value)
def get_date_text(date_format=None):
"""Get the Alp date in date_format"""
if date_format is None:
date_format = _default_hex_date_format
return _date_format_unit_regex.sub(_format_replace, date_format)
######################################################################
# Virtual LEDs creating a clock
_clock_letters = 'abcdefghijklmnopqrs'
_default_clock_layout = '''\
a b c d
e g h i j
f k l m n
o p q r s\
'''
_default_clock_controls = '#(yellow)'
class _Container(object):
pass
class _Lamp(object):
"""A simulation of a simple lamp"""
def __init__(self, **kwds):
self.items = {
True: _Container(), False: _Container()}
for key, val in kwds.iteritems():
if key[-1] in ('0', '1'):
key_vals = bool(int(key[-1])),
key = key[:-1]
else:
key_vals = True, False
for x in key_vals:
self.items[x].__setattr__(key, val)
for x in self.items.values():
d = x.__dict__
if not 'letter' in d:
x.letter = 'o'
if not 'fg' in d:
x.fg = None
if not 'bg' in d:
x.bg = None
if not 'controls' in d:
x.controls = None
def generate(self, state):
info = self.items[state]
text = info.letter
pre_esc = ''
if info.bg:
pre_esc += '#(%s)' % info.bg
if info.fg:
pre_esc += '$(%s)' % info.fg
if info.controls:
for x in info.controls:
pre_esc += '!(' + x + ')'
text = pre_esc + text
if pre_esc:
text += '!(normal)'
return text
_clock_formatting = {
'a': _Lamp(bg='yellow', fg='green', letter0='·', letter1='O', controls=['bold']),
'b': _Lamp(bg='yellow', fg='green', letter0='·', letter1='O', controls=['bold']),
'c': _Lamp(bg='yellow', fg='green', letter0='·', letter1='O', controls=['bold']),
'd': _Lamp(bg='yellow', fg='green', letter0='·', letter1='O', controls=['bold']),
'e': _Lamp(bg='magenta', fg='cyan', letter0='·', letter1='#', controls=['bold']),
'f': _Lamp(bg='magenta', fg='cyan', letter0='·', letter1='#', controls=['bold']),
'g': _Lamp(bg='black', fg='red', letter0='·', letter1='o', controls=['bold']),
'h': _Lamp(bg='black', fg='red', letter0='·', letter1='o', controls=['bold']),
'i': _Lamp(bg='black', fg='red', letter0='·', letter1='o', controls=['bold']),
'j': _Lamp(bg='black', fg='red', letter0='·', letter1='o', controls=['bold']),
'k': _Lamp(bg='blue', fg='red', letter0='·', letter1='o', controls=['bold']),
'l': _Lamp(bg='blue', fg='red', letter0='·', letter1='o', controls=['bold']),
'm': _Lamp(bg='blue', fg='red', letter0='·', letter1='o', controls=['bold']),
'n': _Lamp(bg='blue', fg='red', letter0='·', letter1='o', controls=['bold']),
'o': _Lamp(bg='yellow', fg='white', letter0='·', letter1='>', controls=['bold']),
'p': _Lamp(bg='yellow', fg='white', letter0='·', letter1='>', controls=['bold']),
'q': _Lamp(bg='yellow', fg='white', letter0='·', letter1='>', controls=['bold']),
'r': _Lamp(bg='yellow', fg='white', letter0='·', letter1='>', controls=['bold']),
's': _Lamp(bg='yellow', fg='white', letter0='·', letter1='>', controls=['bold']),
'*': _Lamp(letter0='.', letter1='o') # For eventual non-defined letters
}
_clock_states = {}
def _set_states_from_hex(unit_time, *var):
global _clock_states
for i in range(len(var) - 1, -1, -1):
t = unit_time / 2**i
unit_time -= t * 2**i
_clock_states[var[i]] = bool(t)
def _get_states_from_hex(unit_time, w=4):
num = []
for i in range(w - 1, -1, -1):
t = unit_time / 2**i
unit_time -= t * 2**i
num.append(bool(t))
num.reverse()
return num
def update_clock():
"""Update the internal representation of a physical Alp clock"""
global _clock_states
_set_states_from_hex(time.hexalp, 'a', 'b', 'c', 'd')
_set_states_from_hex(time.qvalp, 'e', 'f')
_set_states_from_hex(time.salp, 'g', 'h', 'i', 'j')
_set_states_from_hex(time.talp, 'k', 'l', 'm', 'n')
Q = _get_states_from_hex(time.second)
O = Q[0] or Q[1]
P = Q[2] ^ Q[3]
R = not Q[2] and not Q[3]
val_e = (not Q[0] and Q[1] and P) or (Q[0] and \
((Q[2] and Q[3]) or (not Q[1] and not Q[2] and not Q[3])))
val_d = Q[1] and ((not Q[0] and not P) or (Q[0] \
and ((not Q[2] and Q[3]) or Q[2])))
val_c = (Q[3] and (Q[2] or not O)) or (Q[0] and Q[1] and R)
val_b = (not O and Q[2] and not Q[3]) or (Q[3] and (O or Q[2]))
val_a = Q[3] or (Q[2] and O)
_clock_states['o'] = val_e
_clock_states['p'] = val_d
_clock_states['q'] = val_c
_clock_states['r'] = val_b
_clock_states['s'] = val_a
def _get_clock_formatting(obj):
obj = obj.groups(1)[0]
try:
return _clock_formatting[obj].generate(_clock_states[obj])
except KeyError:
return _clock_formatting['*'].generate(_clock_states[obj])
def get_clock_text(clock_layout=None):
"""Get a representation of a physical Alp clock"""
if clock_layout is None:
clock_layout = _default_clock_layout
clock_layout = re.sub(r'([' + _clock_letters + '])',
lambda obj: _get_clock_formatting(obj) \
+ _default_clock_controls, clock_layout)
text = _default_clock_controls + clock_layout.replace(
'\n', '!(normal)\n' + _default_clock_controls) + '!(normal)'
return text
update_clock()
######################################################################
# Gregorian calendar compatibility
# Format (using strftime)
_default_gregorian_date_format = '\
!(bold)#(red)$(white)GR\
#(yellow) \
!(bold)#(cyan)$(yellow)%Y\
#(blue)$(white)-\
#(cyan)$(yellow)%m\
#(blue)$(white)-\
#(cyan)$(yellow)%d\n\
!(bold)#(red)$(white)EGOR\
#(yellow) \
#(black)$(cyan)%H\
#(blue)$(white):\
#(black)$(green)%M\
#(blue)$(white):\
#(black)$(red)%S'
def get_gregorian_date_text(date_format=None):
"""Get the Gregorian date in date_format"""
if date_format is None:
date_format = _default_gregorian_date_format
return time.real_date.strftime(date_format)
######################################################################
# Convenience functions
def update_all(date=None):
"""Update both internal time and clock representation"""
update(date)
update_clock()
def print_time(date_format=None, greg_date_format=None,
clock_layout=None, date=None, show=None,
formatting=False, continous=None, **kwds):
"""
Print the time in different ways. All arguments can be given as
keyword arguments instead of ordinary arguments.
"""
date_format = date_format or kwds.get('date_format')
greg_date_format = greg_date_format or kwds.get('greg_date_format')
clock_layout = clock_layout or kwds.get('clock_layout')
date = date or kwds.get('date') or datetime.utcnow()
show = show or kwds.get('show') or ['datetime']
use_formatting = formatting
if use_formatting is None:
use_formatting = kwds.get('formatting')
if use_formatting is None:
use_formatting = True
be_continous = continous or kwds.get('continous') or False
def _print_part():
t = ''
for x in show:
if x == 'datetime':
date_text = get_date_text(date_format)
t += '\n\n' + date_text
elif x == 'gregdatetime':
greg_date_text = get_gregorian_date_text(greg_date_format)
t += '\n\n' + greg_date_text
elif x == 'clock':
update_clock()
clock_text = get_clock_text(clock_layout)
t += '\n\n' + clock_text
t = t.strip()
return t + '!(normal)'
start_formatter()
formatter.generate('!(hide_cursor)', True)
update()
prev = time.seconds_since_epoch
if not be_continous:
text = _print_part()
text = text.replace('\n', '!(normal)\n') + '!(normal)'
if not use_formatting:
text = unformat(text, False)
text = formatter.generate(text)
if _using_curses:
print text
else:
print text,
return
go_up = 0
try:
while True:
update()
now = time.seconds_since_epoch
if now > prev:
prev = now
if _using_curses:
text = '!(up)' * go_up + '!(up)\n' + \
_print_part() + '!(up)!(down)'
else:
text = _print_part() + '\n\n'
text = text.replace('\n', '!(normal)\n') + '!(normal)'
go_up = text.count('\n') - 1
if not use_formatting:
text = unformat(text, False)
formatter.generate(text, True)
sleep_time = 0.5 / time.speed
if sleep_time < 0.01:
sleep_time = 0.01
time_module.sleep(sleep_time)
except KeyboardInterrupt:
formatter.generate('\n!(up)' + '!(clear_line)!(up)' * go_up, True)
raise KeyboardInterrupt()
######################################################################
if __name__ == '__main__':
from optparse import OptionParser
class XParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
parser = XParser(
usage='Usage: %prog [options] [date]',
description='Alp time tools',
version='''Alp software %s
Copyright (C) 2010 Niels Serup
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.''' % '.'.join(map(str, version)),
epilog='''
The date format is "GRE:year,month,day,hour,minute,second"
if you specify a date from the Gregorian calendar, or
"ALP:alp,hexalp,qvalp,salp,talp,second" if you specify a date
using the Alp units. If no date is given, it defaults to "now".
Examples:
Show both the Alp time, the gregorian time, and an Alp clock
alp -s all -c
Show only the Alp time:
alp -c
Show once, then exit
alp
Continously show a clock with no formatting:
alp -c -F -s clock
''')
parser.add_option('-s', '--show', dest='show', metavar='TYPE', action='append',
help='choose which types of displays to show. You \
can choose between "datetime" (or "1"), "clock" (or "2"), and \
"gregdatetime" (or "3"). You can also set it to "all". This setting can \
be specified more than once, but if it isn\'t given at all, only \
"datetime" is shown')
parser.add_option('-c', '--continous', dest='continous',
action='store_true', default=False,
help='Instead of printing the date just once, \
print it again and again until you interrupt it')
parser.add_option('-F', '--no-formatting', dest='formatting',
action='store_false', default=True,
help='don\'t attempt to format strings (i.e. using \
colors and making the text bold)')
parser.add_option('--no-curses', dest='use_curses',
action='store_false', default=True,
help='do not attempt to use the curses library')
parser.add_option('--debug-speed', dest='debug_speed',
metavar='SPEED', type='int',
help='change the speed (default is 1; setting it to \
a higher value makes it go faster).')
options, args = parser.parse_args()
try:
date = args[0].lower().split(':')
typ = date[0]
date = date[1].split(',')
if typ == 'alp':
date = alp_to_datetime(*date)
elif typ == 'gre':
date = datetime(*map(int, date))
except IndexError:
date = datetime.utcnow()
if options.show is None:
options.show = ['datetime']
elif 'all' in options.show:
options.show = ['datetime', 'clock', 'gregdatetime']
else:
for i in range(len(options.show)):
if options.show[i] == '1':
options.show[i] = 'datetime'
elif options.show[i] == '2':
options.show[i] = 'clock'
elif options.show[i] == '3':
options.show[i] = 'gregdatetime'
if options.debug_speed is not None:
set_speed(options.debug_speed)
try:
from setproctitle import setproctitle
setproctitle('alp')
except ImportError:
pass
start_formatter(options.use_curses)
set_start_date(date)
try:
print_time(date=date, show=options.show,
formatting=options.formatting,
continous=options.continous)
except (KeyboardInterrupt, EOFError):
pass
finally:
formatter._end()
|
Alp
|
/Alp-0.1.1.tar.gz/Alp-0.1.1/alp.py
|
alp.py
|
============
Alp software
============
The Alp is a new unit meant for measuring time. To understand and use
this unit, the program included in this distribution has been
created. It's both a command-line tool, named "alp", and a Python
module.
You can learn about the Alp unit and download documentation about it
at http://metanohi.org/projects/alp/
License
=======
Alp software is free software under the terms of the GNU General
Public License version 3 (or any later version). The author of Alp
software is Niels Serup, contactable at [email protected]. This is
version 0.1.1 of the program.
The libraries used by Alp software are GPL-compatible.
Installing
==========
The Alp program and module is contained in a single file,
``alp.py``. It is not necessary to install it, but it can make it
easier to use the Alp software.
Way #1
------
Just run this (requires that you have python-setuptools installed)::
# easy_install Alp
Way #2
------
Get the newest version of Alp at
http://metanohi.org/projects/alp/ or at
http://pypi.python.org/pypi/Alp
Extract the downloaded file and run this in a terminal::
# python setup.py install
Dependencies
============
Python 2.5+ is probably a requirement.
For formatting and control codes, Alp will attempt to use ncurses. If
that fails, Alp will try to use the Python termcolor module, available
at http://pypi.python.org/pypi/termcolor/, installable using ``$ sudo
easy_install termcolor`` (released under the GPLv3+).
If present, Alp will also use the ``setproctitle`` Python module,
available at http://pypi.python.org/pypi/setproctitle/, installable
using ``$ sudo easy_install setproctitle`` (released under the New BSD
License).
Using
=====
When using the Alp software as a command-line tool, simply run
``alp``. Run ``alp --help`` to see what options you can specify.
When using it as a module, just use ``import alp`` in your Python
program. To learn how the Alp module works, run ``pydoc alp`` or
``python -c 'import alp; help(alp)'``. There are also a couple of
tests in the ``tests`` directory.
Developing
==========
Alp software is written in Python and uses Git for branches. To get the
latest branch, get it from gitorious.org like this::
$ git clone git://gitorious.org/Alp/alp.git
This document
=============
Copyright (C) 2010 Niels Serup
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. This file is offered as-is,
without any warranty.
|
Alp
|
/Alp-0.1.1.tar.gz/Alp-0.1.1/README.txt
|
README.txt
|
# Alpha - Mind
<table>
<tr>
<td>Python version</td>
<td><img src="https://img.shields.io/badge/python-3.6-blue.svg"/> </td>
</tr>
<tr>
<tr>
<td>Build Status</td>
<td>
<img src="https://travis-ci.org/alpha-miner/alpha-mind.svg" alt="travis build status" />
</td>
</tr>
<tr>
<td>Coverage</td>
<td><img src="https://coveralls.io/repos/github/alpha-miner/alpha-mind/badge.svg?branch=master" alt="coverage" /></td>
</tr>
</table>
**Alpha - Mind** 是基于 **Python** 开发的股票多因子研究框架。
## TODO list
**alpha-mind**的开发经过长期的暂停之后,将重启。下面的列表会给出一组现在规划中的功能或者改进:
- [x] 增加对于数据后端MySQL的支持;
- [ ] 增加对于数据后端CSV文件的支持,并且提供一份样例文件供用户测试使用;
- [x] 删除所有的c++相关代码,方便alpha-mind的安装;
- [x] 在windows以及linux平台提供可以直接pip安装的安装包;
- [ ] 完整的文档;
- [ ] alpha模型增加超参数调优的功能;
- [ ] alpha模型增加多期预测能力;
- [ ] 优化器增加多期优化的能力。
## 依赖
该项目主要有两个主要的github外部依赖:
* [Finance-Python](https://github.com/alpha-miner/finance-python)
* [portfolio - optimizer](https://github.com/alpha-miner/portfolio-optimizer):该项目是相同作者编写的用于资产组合配置的优化器工具包;
这两个库都可以直接使用pip进行安装。
## 功能
alpha - mind 提供了多因子研究中常用的工具链,包括:
* 数据清洗
* alpha 模型
* 风险模型
* 组合优化
* 执行器
所有的模块都设计了完整的测试用例以尽可能保证正确性。同时,所有的数值模型开发中都对性能给予了足够高的关注,参考了优秀的第三方工具以保证性能:
* numpy
* numba
* cvxopt
* cvxpy
* pandas
* scipy
## 安装
安装需要直接clone或者下载源代码安装,具体流程为:
克隆项目到本地
```shell
$ git clone https://github.com/alpha-miner/alpha-mind.git
```
然后直接使用一下命令安装
```shell
$ python setup.py install
```
### 使用Docker运行
1. `docker build -t alpha-mind:latest -f Dockerfile .`
2. `docker run -it -p 8080:8080 --name alpha-mind alpha-mind`
#### 提示
环境变量的配置在`./entrypoint.sh`中,包括:
* `DB_VENDOR`: 如果使用mysql,请设置为`rl`;
* `DB_URI`: 数据库的连接串。
* `FACTOR_TABLES`: 使用的因子表
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/README.md
|
README.md
|
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import risk_styles
from alphamind.data.engines.sqlengine import industry_styles
from alphamind.data.engines.sqlengine import macro_styles
from alphamind.analysis.factoranalysis import er_portfolio_analysis
from alphamind.analysis.factoranalysis import factor_analysis
from alphamind.analysis.quantileanalysis import er_quantile_analysis
from alphamind.analysis.quantileanalysis import quantile_analysis
from alphamind.data.engines.universe import Universe
from alphamind.data.engines.utilities import industry_list
from alphamind.data.neutralize import neutralize
from alphamind.data.processing import factor_processing
from alphamind.data.rank import percentile
from alphamind.data.rank import rank
from alphamind.data.standardize import Standardizer
from alphamind.data.standardize import projection
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import NormalWinsorizer
from alphamind.data.winsorize import winsorize_normal
from alphamind.execution.naiveexecutor import NaiveExecutor
from alphamind.execution.pipeline import ExecutionPipeline
from alphamind.execution.targetvolexecutor import TargetVolExecutor
from alphamind.execution.thresholdexecutor import ThresholdExecutor
from alphamind.model import ConstLinearModel
from alphamind.model import LassoRegression
from alphamind.model import LinearRegression
from alphamind.model import LogisticRegression
from alphamind.model import NvSVRModel
from alphamind.model import RandomForestClassifier
from alphamind.model import RandomForestRegressor
from alphamind.model import XGBClassifier
from alphamind.model import XGBRegressor
from alphamind.model import XGBTrainer
from alphamind.model import load_model
from alphamind.model.composer import Composer
from alphamind.model.composer import DataMeta
from alphamind.model.composer import predict_by_model
from alphamind.model.composer import train_model
from alphamind.model.data_preparing import fetch_data_package
from alphamind.model.data_preparing import fetch_predict_phase
from alphamind.model.data_preparing import fetch_train_phase
from alphamind.portfolio.constraints import BoundaryDirection
from alphamind.portfolio.constraints import BoundaryType
from alphamind.portfolio.constraints import Constraints
from alphamind.portfolio.constraints import LinearConstraints
from alphamind.portfolio.constraints import create_box_bounds
from alphamind.portfolio.evolver import evolve_positions
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
__all__ = [
'SqlEngine',
'factor_analysis',
'er_portfolio_analysis',
'quantile_analysis',
'er_quantile_analysis',
'Universe',
'factor_processing',
'Constraints',
'LinearConstraints',
'BoundaryType',
'BoundaryDirection',
'create_box_bounds',
'evolve_positions',
'risk_styles',
'industry_styles',
'macro_styles',
'winsorize_normal',
'NormalWinsorizer',
'standardize',
'Standardizer',
'projection',
'neutralize',
'rank',
'percentile',
'industry_list',
'fetch_data_package',
'fetch_train_phase',
'fetch_predict_phase',
'Composer',
'DataMeta',
'train_model',
'predict_by_model',
'LinearRegression',
'LassoRegression',
'ConstLinearModel',
'LogisticRegression',
'RandomForestRegressor',
'RandomForestClassifier',
'XGBRegressor',
'XGBClassifier',
'XGBTrainer',
'NvSVRModel',
'load_model',
'NaiveExecutor',
'ThresholdExecutor',
'TargetVolExecutor',
'ExecutionPipeline',
'alpha_logger',
'map_freq'
]
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/api.py
|
api.py
|
import base64
import math
import pickle
import numba as nb
import numpy as np
from simpleutils import CustomLogger
alpha_logger = CustomLogger('ALPHA_MIND', 'info')
def map_freq(freq):
if freq == '1m':
horizon = 21
elif freq == '1w':
horizon = 4
elif freq == '2w':
horizon = 9
elif freq == '3w':
horizon = 14
elif freq == '4w':
horizon = 19
elif freq == '1d':
horizon = 0
elif freq[-1] == "b":
horizon = int(freq[:-1]) - 1
else:
raise ValueError("Unrecognized freq: {0}".format(freq))
return horizon
def groupby(groups):
order = groups.argsort()
t = groups[order]
index_diff = np.where(np.diff(t))[0]
return np.concatenate([index_diff, [len(groups)]]), order
@nb.njit(nogil=True, cache=True)
def set_value(mat, used_level, to_fill):
length, width = used_level.shape
for i in range(length):
for j in range(width):
k = used_level[i, j]
mat[k, j] = to_fill
@nb.njit(nogil=True, cache=True)
def group_mapping(groups: np.ndarray) -> np.ndarray:
length = groups.shape[0]
order = groups.argsort()
res = np.zeros(length, dtype=order.dtype)
start = 0
res[order[0]] = start
previous = groups[order[0]]
for i in range(1, length):
curr_idx = order[i]
curr_val = groups[curr_idx]
if curr_val != previous:
start += 1
res[curr_idx] = start
else:
res[curr_idx] = start
previous = curr_val
return res
@nb.njit(nogil=True, cache=True)
def simple_sum(x, axis=0):
length, width = x.shape
if axis == 0:
res = np.zeros(width)
for i in range(length):
for j in range(width):
res[j] += x[i, j]
elif axis == 1:
res = np.zeros(length)
for i in range(length):
for j in range(width):
res[i] += x[i, j]
else:
raise ValueError("axis value is not supported")
return res
@nb.njit(nogil=True, cache=True)
def simple_abssum(x, axis=0):
length, width = x.shape
if axis == 0:
res = np.zeros(width)
for i in range(length):
for j in range(width):
res[j] += abs(x[i, j])
elif axis == 1:
res = np.zeros(length)
for i in range(length):
for j in range(width):
res[i] += abs(x[i, j])
else:
raise ValueError("axis value is not supported")
return res
@nb.njit(nogil=True, cache=True)
def simple_sqrsum(x, axis=0):
length, width = x.shape
if axis == 0:
res = np.zeros(width)
for i in range(length):
for j in range(width):
res[j] += x[i, j] * x[i, j]
elif axis == 1:
res = np.zeros(length)
for i in range(length):
for j in range(width):
res[i] += x[i, j] * x[i, j]
else:
raise ValueError("axis value is not supported")
res = np.sqrt(res)
return res
@nb.njit(nogil=True, cache=True)
def simple_mean(x, axis=0):
length, width = x.shape
if axis == 0:
res = np.zeros(width)
for j in range(width):
for i in range(length):
res[j] += x[i, j]
res[j] /= length
elif axis == 1:
res = np.zeros(length)
for i in range(length):
for j in range(width):
res[i] += x[i, j]
res[i] /= width
else:
raise ValueError("axis value is not supported")
return res
@nb.njit(nogil=True, cache=True)
def simple_std(x, axis=0, ddof=1):
length, width = x.shape
if axis == 0:
res = np.zeros(width)
sum_mat = np.zeros(width)
for j in range(width):
for i in range(length):
res[j] += x[i, j] * x[i, j]
sum_mat[j] += x[i, j]
res[j] = math.sqrt((res[j] - sum_mat[j] * sum_mat[j] / length) / (length - ddof))
elif axis == 1:
res = np.zeros(length)
sum_mat = np.zeros(width)
for i in range(length):
for j in range(width):
res[i] += x[i, j] * x[i, j]
sum_mat[i] += x[i, j]
res[i] = math.sqrt((res[i] - sum_mat[i] * sum_mat[i] / width) / (width - ddof))
else:
raise ValueError("axis value is not supported")
return res
@nb.njit(nogil=True, cache=True)
def agg_sum(groups, x):
max_g = groups.max()
length, width = x.shape
res = np.zeros((max_g + 1, width), dtype=np.float64)
for i in range(length):
for j in range(width):
res[groups[i], j] += x[i, j]
return res
@nb.njit(nogil=True, cache=True)
def agg_sqrsum(groups, x):
max_g = groups.max()
length, width = x.shape
res = np.zeros((max_g + 1, width), dtype=np.float64)
for i in range(length):
for j in range(width):
res[groups[i], j] += x[i, j] * x[i, j]
res = np.sqrt(res)
return res
@nb.njit(nogil=True, cache=True)
def agg_abssum(groups, x):
max_g = groups.max()
length, width = x.shape
res = np.zeros((max_g + 1, width), dtype=np.float64)
for i in range(length):
for j in range(width):
res[groups[i], j] += abs(x[i, j])
return res
@nb.njit(nogil=True, cache=True)
def agg_mean(groups, x):
max_g = groups.max()
length, width = x.shape
res = np.zeros((max_g + 1, width), dtype=np.float64)
bin_count = np.zeros(max_g + 1, dtype=np.int32)
for i in range(length):
for j in range(width):
res[groups[i], j] += x[i, j]
bin_count[groups[i]] += 1
for i in range(max_g + 1):
curr = bin_count[i]
for j in range(width):
res[i, j] /= curr
return res
@nb.njit(nogil=True, cache=True)
def agg_std(groups, x, ddof=1):
max_g = groups.max()
length, width = x.shape
res = np.zeros((max_g + 1, width), dtype=np.float64)
sumsq = np.zeros((max_g + 1, width), dtype=np.float64)
bin_count = np.zeros(max_g + 1, dtype=np.int32)
for i in range(length):
for j in range(width):
res[groups[i], j] += x[i, j]
sumsq[groups[i], j] += x[i, j] * x[i, j]
bin_count[groups[i]] += 1
for i in range(max_g + 1):
curr = bin_count[i]
for j in range(width):
res[i, j] = math.sqrt((sumsq[i, j] - res[i, j] * res[i, j] / curr) / (curr - ddof))
return res
@nb.njit(nogil=True, cache=True)
def copy_value(groups, source):
length = groups.shape[0]
width = source.shape[1]
destination = np.zeros((length, width))
for i in range(length):
k = groups[i]
for j in range(width):
destination[i, j] = source[k, j]
return destination
@nb.njit(nogil=True, cache=True)
def scale_value(groups, source, x, scale):
length, width = x.shape
destination = x.copy()
for i in range(length):
k = groups[i]
for j in range(width):
destination[i, j] /= source[k, j] / scale
return destination
@nb.njit(nogil=True, cache=True)
def array_index(array, items):
to_look_length = items.shape[0]
arr_length = array.shape[0]
res = np.zeros(to_look_length, dtype=array.dtype)
for i in range(to_look_length):
for j in range(arr_length):
if items[i] == array[j]:
res[i] = j
break
return res
def transform(groups: np.ndarray,
x: np.ndarray,
func: str,
ddof: int = 1,
scale: float = 1.) -> np.ndarray:
if func == 'mean':
value_data = agg_mean(groups, x)
elif func == 'std':
value_data = agg_std(groups, x, ddof=ddof)
elif func == 'sum':
value_data = agg_sum(groups, x)
elif func == 'abssum' or func == 'scale':
value_data = agg_abssum(groups, x)
elif func == 'sqrsum' or func == 'project':
value_data = agg_sqrsum(groups, x)
else:
raise ValueError('({0}) is not recognized as valid functor'.format(func))
if func == 'scale' or func == 'project':
return scale_value(groups, value_data, x, scale)
else:
return copy_value(groups, value_data)
def aggregate(groups, x, func, ddof=1):
if func == 'mean':
value_data = agg_mean(groups, x)
elif func == 'std':
value_data = agg_std(groups, x, ddof=ddof)
elif func == 'sum':
value_data = agg_sum(groups, x)
elif func == 'abssum' or func == 'scale':
value_data = agg_abssum(groups, x)
elif func == 'sqrsum' or func == 'project':
value_data = agg_sqrsum(groups, x)
else:
raise ValueError('({0}) is not recognized as valid functor'.format(func))
return value_data
def encode(obj: object) -> str:
encoded = base64.encodebytes(pickle.dumps(obj))
return encoded.decode('ascii')
def decode(str_repr: str):
encoded = str_repr.encode('ascii')
return pickle.loads(base64.decodebytes(encoded))
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/utilities.py
|
utilities.py
|
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from alphamind.data.processing import factor_processing
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.portfolio.constraints import Constraints
from alphamind.portfolio.constraints import LinearConstraints
from alphamind.portfolio.linearbuilder import linear_builder
from alphamind.portfolio.longshortbulder import long_short_builder
from alphamind.portfolio.meanvariancebuilder import mean_variance_builder
from alphamind.portfolio.meanvariancebuilder import target_vol_builder
from alphamind.portfolio.rankbuilder import rank_build
from alphamind.settlement.simplesettle import simple_settle
def factor_analysis(factors: pd.DataFrame,
factor_weights: np.ndarray,
industry: np.ndarray,
d1returns: np.ndarray = None,
detail_analysis=True,
benchmark: Optional[np.ndarray] = None,
risk_exp: Optional[np.ndarray] = None,
is_tradable: Optional[np.ndarray] = None,
constraints: Optional[Constraints] = None,
method='risk_neutral',
**kwargs) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
if 'pre_process' in kwargs:
pre_process = kwargs['pre_process']
del kwargs['pre_process']
else:
pre_process = [winsorize_normal, standardize]
if 'post_process' in kwargs:
post_process = kwargs['post_process']
del kwargs['post_process']
else:
post_process = [winsorize_normal, standardize]
er = factor_processing(factors.values, pre_process, risk_exp, post_process) @ factor_weights
return er_portfolio_analysis(er,
industry,
d1returns,
constraints,
detail_analysis,
benchmark,
is_tradable,
method,
**kwargs)
def er_portfolio_analysis(er: np.ndarray,
industry: np.ndarray,
dx_return: np.ndarray,
constraints: Optional[Union[LinearConstraints, Constraints]] = None,
detail_analysis=True,
benchmark: Optional[np.ndarray] = None,
is_tradable: Optional[np.ndarray] = None,
method='risk_neutral',
**kwargs) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
er = er.flatten()
def create_constraints(benchmark, **kwargs):
if 'lbound' in kwargs:
lbound = kwargs['lbound'].copy()
del kwargs['lbound']
else:
lbound = np.maximum(0., benchmark - 0.01)
if 'ubound' in kwargs:
ubound = kwargs['ubound'].copy()
del kwargs['ubound']
else:
ubound = 0.01 + benchmark
if is_tradable is not None:
ubound[~is_tradable] = np.minimum(lbound, ubound)[~is_tradable]
risk_lbound, risk_ubound = constraints.risk_targets()
cons_exp = constraints.risk_exp
return lbound, ubound, cons_exp, risk_lbound, risk_ubound
if method == 'risk_neutral':
lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(benchmark, **kwargs)
turn_over_target = kwargs.get('turn_over_target')
current_position = kwargs.get('current_position')
status, _, weights = linear_builder(er,
risk_constraints=cons_exp,
lbound=lbound,
ubound=ubound,
risk_target=(risk_lbound, risk_ubound),
turn_over_target=turn_over_target,
current_position=current_position)
if status not in ("optimal", "optimal_inaccurate"):
raise ValueError('linear programming optimizer in status: {0}'.format(status))
elif method == 'rank':
weights = rank_build(er, use_rank=kwargs['use_rank'],
masks=is_tradable).flatten() * benchmark.sum() / kwargs[
'use_rank']
elif method == 'ls' or method == 'long_short':
weights = long_short_builder(er).flatten()
elif method == 'mv' or method == 'mean_variance':
lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(benchmark, **kwargs)
risk_model = kwargs['risk_model']
if 'lam' in kwargs:
lam = kwargs['lam']
else:
lam = 1.
status, _, weights = mean_variance_builder(er,
risk_model=risk_model,
bm=benchmark,
lbound=lbound,
ubound=ubound,
risk_exposure=cons_exp,
risk_target=(risk_lbound, risk_ubound),
lam=lam)
if status != 'optimal':
raise ValueError('mean variance optimizer in status: {0}'.format(status))
elif method == 'tv' or method == 'target_vol':
lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(benchmark, **kwargs)
risk_model = kwargs['risk_model']
if 'target_vol' in kwargs:
target_vol = kwargs['target_vol']
else:
target_vol = 1.
status, _, weights = target_vol_builder(er,
risk_model=risk_model,
bm=benchmark,
lbound=lbound,
ubound=ubound,
risk_exposure=cons_exp,
risk_target=(risk_lbound, risk_ubound),
vol_target=target_vol)
else:
raise ValueError("Unknown building type ({0})".format(method))
if detail_analysis:
analysis = simple_settle(weights, dx_return, industry, benchmark)
else:
analysis = None
return pd.DataFrame({'weight': weights,
'industry': industry,
'er': er}), \
analysis
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/analysis/factoranalysis.py
|
factoranalysis.py
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from alphamind.data.processing import factor_processing
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.utilities import alpha_logger
def cs_impl(ref_date,
factor_data,
factor_name,
risk_exposure,
constraint_risk,
industry_matrix,
dx_returns):
total_data = pd.merge(factor_data, risk_exposure, on='code')
total_data = pd.merge(total_data, industry_matrix, on='code')
total_data = total_data.replace([np.inf, -np.inf], np.nan).dropna()
if len(total_data) < 0.33 * len(factor_data):
alpha_logger.warning(f"valid data point({len(total_data)}) "
f"is less than 33% of the total sample ({len(factor_data)}). Omit this run")
return np.nan, np.nan, np.nan
total_risk_exp = total_data[constraint_risk]
er = total_data[[factor_name]].values.astype(float)
er = factor_processing(er, [winsorize_normal, standardize], total_risk_exp.values,
[standardize]).flatten()
industry = total_data.industry_name.values
codes = total_data.code.tolist()
target_pos = pd.DataFrame({'code': codes,
'weight': er,
'industry': industry})
target_pos['weight'] = target_pos['weight'] / target_pos['weight'].abs().sum()
target_pos = pd.merge(target_pos, dx_returns, on=['code'])
target_pos = pd.merge(target_pos, total_data[['code'] + constraint_risk], on=['code'])
total_risk_exp = target_pos[constraint_risk]
activate_weight = target_pos['weight'].values
excess_return = np.exp(target_pos[['dx']].values) - 1.
excess_return = factor_processing(excess_return,
[winsorize_normal, standardize],
total_risk_exp.values,
[winsorize_normal, standardize]).flatten()
port_ret = np.log(activate_weight @ excess_return + 1.)
ic = np.corrcoef(excess_return, activate_weight)[0, 1]
x = sm.add_constant(activate_weight)
results = sm.OLS(excess_return, x).fit()
t_stats = results.tvalues[1]
alpha_logger.info(f"{ref_date} is finished with {len(target_pos)} stocks for {factor_name}")
alpha_logger.info(f"{ref_date} risk_exposure: "
f"{np.sum(np.square(target_pos.weight.values @ target_pos[constraint_risk].values))}")
return port_ret, ic, t_stats
def cross_section_analysis(ref_date,
factor_name,
universe,
horizon,
constraint_risk,
engine):
codes = engine.fetch_codes(ref_date, universe)
risk_exposure = engine.fetch_risk_model(ref_date, codes)[1][['code'] + constraint_risk]
factor_data = engine.fetch_factor(ref_date, factor_name, codes)
industry_matrix = engine.fetch_industry_matrix(ref_date, codes, 'sw_adj', 1)
dx_returns = engine.fetch_dx_return(ref_date, codes, horizon=horizon, offset=1)
return cs_impl(ref_date, factor_data, factor_name, risk_exposure, constraint_risk,
industry_matrix, dx_returns)
if __name__ == '__main__':
from alphamind.api import SqlEngine, Universe, risk_styles, industry_styles
factor_name = 'SIZE'
data_source = 'postgres+psycopg2://postgres:[email protected]/alpha'
engine = SqlEngine(data_source)
risk_names = list(set(risk_styles).difference({factor_name}))
industry_names = list(set(industry_styles).difference({factor_name}))
constraint_risk = risk_names + industry_names
universe = Universe('custom', ['ashare_ex'])
horizon = 9
x = cross_section_analysis('2018-02-08',
factor_name,
universe,
horizon,
constraint_risk,
engine=engine)
print(x)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/analysis/crosssetctions.py
|
crosssetctions.py
|
from typing import Optional
import numpy as np
import pandas as pd
from alphamind.data.processing import factor_processing
from alphamind.data.quantile import quantile
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.utilities import agg_mean
def quantile_analysis(factors: pd.DataFrame,
factor_weights: np.ndarray,
dx_return: np.ndarray,
n_bins: int = 5,
risk_exp: Optional[np.ndarray] = None,
**kwargs):
if 'pre_process' in kwargs:
pre_process = kwargs['pre_process']
del kwargs['pre_process']
else:
pre_process = [winsorize_normal, standardize]
if 'post_process' in kwargs:
post_process = kwargs['post_process']
del kwargs['post_process']
else:
post_process = [standardize]
er = factor_processing(factors.values, pre_process, risk_exp, post_process) @ factor_weights
return er_quantile_analysis(er, n_bins, dx_return, **kwargs)
def er_quantile_analysis(er: np.ndarray,
n_bins: int,
dx_return: np.ndarray,
de_trend=False) -> np.ndarray:
er = er.flatten()
q_groups = quantile(er, n_bins)
if dx_return.ndim < 2:
dx_return.shape = -1, 1
group_return = agg_mean(q_groups, dx_return).flatten()
total_return = group_return.sum()
ret = group_return.copy()
if de_trend:
resid = n_bins - 1
res_weight = 1. / resid
for i, value in enumerate(ret):
ret[i] = (1. + res_weight) * value - res_weight * total_return
return ret
if __name__ == '__main__':
n = 5000
n_f = 5
n_bins = 5
x = np.random.randn(n, 5)
risk_exp = np.random.randn(n, 3)
x_w = np.random.randn(n_f)
r = np.random.randn(n)
f_df = pd.DataFrame(x)
calculated = quantile_analysis(f_df,
x_w,
r,
risk_exp=None,
n_bins=n_bins,
pre_process=[], # [winsorize_normal, standardize],
post_process=[]) # [standardize])
er = x_w @ f_df.values.T
expected = er_quantile_analysis(er, n_bins, r)
print(calculated)
print(expected)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/analysis/quantileanalysis.py
|
quantileanalysis.py
|
import copy
import numpy as np
import pandas as pd
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.analysis.factoranalysis import er_portfolio_analysis
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import industry_styles
from alphamind.data.engines.sqlengine import macro_styles
from alphamind.data.engines.sqlengine import risk_styles
from alphamind.data.processing import factor_processing
from alphamind.exceptions.exceptions import PortfolioBuilderException
from alphamind.execution.naiveexecutor import NaiveExecutor
from alphamind.model.composer import train_model
from alphamind.portfolio.constraints import BoundaryType
from alphamind.portfolio.constraints import LinearConstraints
from alphamind.portfolio.constraints import create_box_bounds
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
all_styles = risk_styles + industry_styles + macro_styles
class RunningSetting(object):
def __init__(self,
lbound=None,
ubound=None,
weights_bandwidth=None,
rebalance_method='risk_neutral',
bounds=None,
**kwargs):
self.lbound = lbound
self.ubound = ubound
self.weights_bandwidth = weights_bandwidth
self.executor = NaiveExecutor()
self.rebalance_method = rebalance_method
self.bounds = bounds
self.more_opts = kwargs
class Strategy(object):
def __init__(self,
alpha_model,
data_meta,
universe,
start_date,
end_date,
freq,
benchmark=905,
industry_cat='sw_adj',
industry_level=1,
dask_client=None):
self.alpha_model = alpha_model
self.data_meta = data_meta
self.universe = universe
self.benchmark = benchmark
self.dates = makeSchedule(start_date, end_date, freq, 'china.sse')
self.dates = [d.strftime('%Y-%m-%d') for d in self.dates]
self.industry_cat = industry_cat
self.industry_level = industry_level
self.freq = freq
self.horizon = map_freq(freq)
self.engine = SqlEngine(self.data_meta.data_source)
self.dask_client = dask_client
self.total_data = None
self.index_return = None
self.risk_models = None
self.alpha_models = None
def prepare_backtest_data(self):
total_factors = self.engine.fetch_factor_range(self.universe,
self.alpha_model.formulas,
dates=self.dates)
alpha_logger.info("alpha factor data loading finished ...")
total_industry = self.engine.fetch_industry_matrix_range(self.universe,
dates=self.dates,
category=self.industry_cat,
level=self.industry_level)
alpha_logger.info("industry data loading finished ...")
total_benchmark = self.engine.fetch_benchmark_range(dates=self.dates,
benchmark=self.benchmark)
alpha_logger.info("benchmark data loading finished ...")
self.risk_models, _, total_risk_exp = self.engine.fetch_risk_model_range(
self.universe,
dates=self.dates,
risk_model=self.data_meta.risk_model,
model_type='factor'
)
alpha_logger.info("risk_model data loading finished ...")
total_returns = self.engine.fetch_dx_return_range(self.universe,
dates=self.dates,
horizon=self.horizon,
offset=1)
alpha_logger.info("returns data loading finished ...")
total_data = pd.merge(total_factors, total_industry, on=['trade_date', 'code'])
total_data = pd.merge(total_data, total_benchmark, on=['trade_date', 'code'], how='left')
total_data.fillna({'weight': 0.}, inplace=True)
total_data = pd.merge(total_data, total_returns, on=['trade_date', 'code'])
total_data = pd.merge(total_data, total_risk_exp, on=['trade_date', 'code'])
is_in_benchmark = (total_data.weight > 0.).astype(float).values.reshape((-1, 1))
total_data.loc[:, 'benchmark'] = is_in_benchmark
total_data.loc[:, 'total'] = np.ones_like(is_in_benchmark)
total_data.sort_values(['trade_date', 'code'], inplace=True)
self.index_return = self.engine.fetch_dx_return_index_range(self.benchmark,
dates=self.dates,
horizon=self.horizon,
offset=1).set_index(
'trade_date')
self.total_data = total_data
def prepare_backtest_models(self):
if self.total_data is None:
self.prepare_backtest_data()
total_data_groups = self.total_data.groupby('trade_date')
if self.dask_client is None:
models = {}
for ref_date, _ in total_data_groups:
models[ref_date], _, _ = train_model(ref_date.strftime('%Y-%m-%d'),
self.alpha_model, self.data_meta)
else:
def worker(parameters):
new_model, _, _ = train_model(parameters[0].strftime('%Y-%m-%d'), parameters[1],
parameters[2])
return parameters[0], new_model
l = self.dask_client.map(worker, [(d[0], self.alpha_model, self.data_meta) for d in
total_data_groups])
results = self.dask_client.gather(l)
models = dict(results)
self.alpha_models = models
alpha_logger.info("alpha models training finished ...")
@staticmethod
def _create_lu_bounds(running_setting, codes, benchmark_w):
codes = np.array(codes)
if running_setting.weights_bandwidth:
lbound = np.maximum(0., benchmark_w - running_setting.weights_bandwidth)
ubound = running_setting.weights_bandwidth + benchmark_w
lb = running_setting.lbound
ub = running_setting.ubound
if lb or ub:
if not isinstance(lb, dict):
lbound = np.ones_like(benchmark_w) * lb
else:
lbound = np.zeros_like(benchmark_w)
for c in lb:
lbound[codes == c] = lb[c]
if 'other' in lb:
for i, c in enumerate(codes):
if c not in lb:
lbound[i] = lb['other']
if not isinstance(ub, dict):
ubound = np.ones_like(benchmark_w) * ub
else:
ubound = np.ones_like(benchmark_w)
for c in ub:
ubound[codes == c] = ub[c]
if 'other' in ub:
for i, c in enumerate(codes):
if c not in ub:
ubound[i] = ub['other']
return lbound, ubound
def run(self, running_setting):
alpha_logger.info("starting backting ...")
total_data_groups = self.total_data.groupby('trade_date')
rets = []
turn_overs = []
leverags = []
previous_pos = pd.DataFrame()
executor = copy.deepcopy(running_setting.executor)
positions = pd.DataFrame()
if self.alpha_models is None:
self.prepare_backtest_models()
for ref_date, this_data in total_data_groups:
risk_model = self.risk_models[ref_date]
new_model = self.alpha_models[ref_date]
codes = this_data.code.values.tolist()
if previous_pos.empty:
current_position = None
else:
previous_pos.set_index('code', inplace=True)
remained_pos = previous_pos.reindex(codes)
remained_pos.fillna(0., inplace=True)
current_position = remained_pos.weight.values
benchmark_w = this_data.weight.values
constraints = LinearConstraints(running_setting.bounds,
this_data,
benchmark_w)
lbound, ubound = self._create_lu_bounds(running_setting, codes, benchmark_w)
this_data.fillna(0, inplace=True)
new_factors = factor_processing(this_data[new_model.features].values,
pre_process=self.data_meta.pre_process,
risk_factors=this_data[
self.data_meta.neutralized_risk].values.astype(
float) if self.data_meta.neutralized_risk else None,
post_process=self.data_meta.post_process)
new_factors = pd.DataFrame(new_factors, columns=new_model.features, index=codes)
er = new_model.predict(new_factors).astype(float)
alpha_logger.info('{0} re-balance: {1} codes'.format(ref_date, len(er)))
target_pos = self._calculate_pos(running_setting,
er,
this_data,
constraints,
benchmark_w,
lbound,
ubound,
risk_model=risk_model.get_risk_profile(codes),
current_position=current_position)
target_pos['code'] = codes
target_pos['trade_date'] = ref_date
turn_over, executed_pos = executor.execute(target_pos=target_pos)
leverage = executed_pos.weight.abs().sum()
ret = executed_pos.weight.values @ (np.exp(this_data.dx.values) - 1.)
rets.append(np.log(1. + ret))
executor.set_current(executed_pos)
turn_overs.append(turn_over)
leverags.append(leverage)
positions = positions.append(executed_pos)
previous_pos = executed_pos
positions['benchmark_weight'] = self.total_data['weight'].values
positions['dx'] = self.total_data.dx.values
trade_dates = positions.trade_date.unique()
ret_df = pd.DataFrame({'returns': rets, 'turn_over': turn_overs, 'leverage': leverags},
index=trade_dates)
ret_df['benchmark_returns'] = self.index_return['dx']
ret_df.loc[advanceDateByCalendar('china.sse', ret_df.index[-1], self.freq)] = 0.
ret_df = ret_df.shift(1)
ret_df.iloc[0] = 0.
ret_df['excess_return'] = ret_df['returns'] - ret_df['benchmark_returns'] * ret_df[
'leverage']
return ret_df, positions
def _calculate_pos(self, running_setting, er, data, constraints, benchmark_w, lbound, ubound,
risk_model,
current_position):
more_opts = running_setting.more_opts
try:
target_pos, _ = er_portfolio_analysis(er,
industry=data.industry_name.values,
dx_return=None,
constraints=constraints,
detail_analysis=False,
benchmark=benchmark_w,
method=running_setting.rebalance_method,
lbound=lbound,
ubound=ubound,
current_position=current_position,
target_vol=more_opts.get('target_vol'),
risk_model=risk_model,
turn_over_target=more_opts.get(
'turn_over_target'))
except PortfolioBuilderException:
alpha_logger.warning("Not able to fit the constraints. Using full re-balance.")
target_pos, _ = er_portfolio_analysis(er,
industry=data.industry_name.values,
dx_return=None,
constraints=constraints,
detail_analysis=False,
benchmark=benchmark_w,
method=running_setting.rebalance_method,
lbound=lbound,
ubound=ubound,
target_vol=more_opts.get('target_vol'),
risk_model=risk_model)
return target_pos
if __name__ == '__main__':
import os
from matplotlib import pyplot as plt
from PyFin.api import CSQuantiles
from PyFin.api import LAST
from alphamind.api import Universe
from alphamind.api import ConstLinearModel
from alphamind.api import DataMeta
from alphamind.api import industry_list
from matplotlib import pyplot as plt
from matplotlib.pylab import mpl
plt.style.use('seaborn-whitegrid')
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
"""
Back test parameter settings
"""
benchmark_code = 905
universe = Universe('zz800') + Universe('cyb')
start_date = '2011-01-01'
end_date = '2011-05-04'
freq = '10b'
neutralized_risk = None
alpha_factors = {
'ep_q_cs': CSQuantiles(LAST('ep_q'), groups='sw1_adj')
}
weights = dict(ep_q_cs=1.)
alpha_model = ConstLinearModel(features=alpha_factors, weights=weights)
data_meta = DataMeta(freq=freq,
universe=universe,
batch=1,
neutralized_risk=None,
pre_process=None,
post_process=None,
data_source=os.environ['DB_URI'])
strategy = Strategy(alpha_model,
data_meta,
universe=universe,
start_date=start_date,
end_date=end_date,
freq=freq,
benchmark=benchmark_code)
strategy.prepare_backtest_data()
def create_scenario(weights_bandwidth=0.02, target_vol=0.01, method='risk_neutral'):
industry_names = industry_list('sw_adj', 1)
constraint_risk = ['EARNYILD', 'LIQUIDTY', 'GROWTH', 'SIZE', 'BETA', 'MOMENTUM']
total_risk_names = constraint_risk + industry_names + ['benchmark', 'total']
b_type = []
l_val = []
u_val = []
for name in total_risk_names:
if name == 'benchmark':
b_type.append(BoundaryType.RELATIVE)
l_val.append(0.8)
u_val.append(1.001)
elif name == 'total':
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.001)
u_val.append(.001)
elif name == 'EARNYILD':
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.001)
u_val.append(0.60)
elif name == 'GROWTH':
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.20)
u_val.append(0.20)
elif name == 'MOMENTUM':
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.10)
u_val.append(0.20)
elif name == 'SIZE':
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.20)
u_val.append(0.20)
elif name == 'LIQUIDTY':
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.25)
u_val.append(0.25)
else:
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(-0.01)
u_val.append(0.01)
bounds = create_box_bounds(total_risk_names, b_type, l_val, u_val)
running_setting = RunningSetting(weights_bandwidth=weights_bandwidth,
rebalance_method=method,
bounds=bounds,
target_vol=target_vol,
turn_over_target=0.4)
ret_df, positions = strategy.run(running_setting)
return ret_df
create_scenario(0.01, target_vol=0.01, method='tv')
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/strategy/strategy.py
|
strategy.py
|
import numba as nb
import numpy as np
from alphamind.utilities import aggregate
from alphamind.utilities import array_index
from alphamind.utilities import group_mapping
from alphamind.utilities import simple_mean
from alphamind.utilities import simple_std
from alphamind.utilities import transform
@nb.njit(nogil=True, cache=True)
def mask_values_2d(x: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3) -> np.ndarray:
res = x.copy()
length, width = x.shape
for i in range(length):
for j in range(width):
ubound = mean_values[i, j] + num_stds * std_values[i, j]
lbound = mean_values[i, j] - num_stds * std_values[i, j]
if x[i, j] > ubound:
res[i, j] = ubound
elif x[i, j] < lbound:
res[i, j] = lbound
return res
@nb.njit(nogil=True, cache=True)
def interp_values_2d(x: np.ndarray,
groups: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3,
interval: float = 0.5) -> np.ndarray:
res = x.copy()
length, width = x.shape
max_cat = np.max(groups)
for k in range(max_cat + 1):
target_idx = np.where(groups == k)[0].flatten()
for j in range(width):
target_x = x[target_idx, j]
target_res = target_x.copy()
mean = mean_values[target_idx[0], j]
std = std_values[target_idx[0], j]
ubound = mean + num_stds * std
lbound = mean - num_stds * std
# upper bound abnormal values
idx = target_x > ubound
n = np.sum(idx)
if n > 0:
u_values = target_res[idx]
q_values = u_values.argsort().argsort()
target_res[idx] = ubound + q_values / n * interval * std
# lower bound abnormal values
idx = target_x < lbound
n = np.sum(idx)
if n > 0:
l_values = target_res[idx]
q_values = (-l_values).argsort().argsort()
target_res[idx] = lbound - q_values / n * interval * std
res[target_idx, j] = target_res
return res
@nb.njit(nogil=True, cache=True)
def mask_values_1d(x: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3) -> np.ndarray:
res = x.copy()
length, width = x.shape
for j in range(width):
ubound = mean_values[j] + num_stds * std_values[j]
lbound = mean_values[j] - num_stds * std_values[j]
res[x[:, j] > ubound, j] = ubound
res[x[:, j] < lbound, j] = lbound
return res
@nb.njit(nogil=True, cache=True)
def interp_values_1d(x: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3,
interval: float = 0.5) -> np.ndarray:
res = x.copy()
length, width = x.shape
for j in range(width):
ubound = mean_values[j] + num_stds * std_values[j]
lbound = mean_values[j] - num_stds * std_values[j]
# upper bound abnormal values
idx = x[:, j] > ubound
n = np.sum(idx)
if n > 0:
u_values = res[idx, j]
q_values = u_values.argsort().argsort()
res[idx, j] = ubound + q_values / n * interval * std_values[j]
# lower bound abnormal values
idx = x[:, j] < lbound
n = np.sum(idx)
if n > 0:
l_values = res[idx, j]
q_values = (-l_values).argsort().argsort()
res[idx, j] = lbound - q_values / n * interval * std_values[j]
return res
def winsorize_normal(x: np.ndarray, num_stds: int = 3, ddof=1,
groups: np.ndarray = None,
method: str = 'flat',
interval: float = 0.5) -> np.ndarray:
if groups is not None:
groups = group_mapping(groups)
mean_values = transform(groups, x, 'mean')
std_values = transform(groups, x, 'std', ddof)
if method == 'flat':
res = mask_values_2d(x, mean_values, std_values, num_stds)
else:
res = interp_values_2d(x, groups, mean_values, std_values, num_stds, interval)
else:
std_values = simple_std(x, axis=0, ddof=ddof)
mean_values = simple_mean(x, axis=0)
if method == 'flat':
res = mask_values_1d(x, mean_values, std_values, num_stds)
else:
res = interp_values_1d(x, mean_values, std_values, num_stds, interval)
return res
class NormalWinsorizer(object):
def __init__(self, num_stds: int = 3,
ddof: int =1,
method: str = 'flat',
interval: float = 0.5):
self.num_stds = num_stds
self.ddof = ddof
self.mean = None
self.std = None
self.labels = None
self.method = method
self.interval = interval
def fit(self, x: np.ndarray, groups: np.ndarray = None):
if groups is not None:
group_index = group_mapping(groups)
self.mean = aggregate(group_index, x, 'mean')
self.std = aggregate(group_index, x, 'std', self.ddof)
self.labels = np.unique(groups)
else:
self.mean = simple_mean(x, axis=0)
self.std = simple_std(x, axis=0, ddof=self.ddof)
def transform(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
if groups is not None:
index = array_index(self.labels, groups)
if self.method == 'flat':
res = mask_values_2d(x, self.mean[index], self.std[index], self.num_stds)
else:
res = interp_values_2d(x, groups,
self.mean[index],
self.std[index],
self.num_stds,
self.interval)
else:
if self.method == 'flat':
res = mask_values_1d(x, self.mean, self.std, self.num_stds)
else:
res = interp_values_1d(x, self.mean, self.std, self.num_stds, self.interval)
return res
def __call__(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
return winsorize_normal(x, self.num_stds, self.ddof, groups, self.method, self.interval)
if __name__ == '__main__':
x = np.random.randn(10000, 1)
groups = np.random.randint(0, 3, 10000)
import datetime as dt
start = dt.datetime.now()
for i in range(1000):
winsorize_normal(x, method='flat')
print(dt.datetime.now() - start)
start = dt.datetime.now()
for i in range(1000):
winsorize_normal(x, method='interp')
print(dt.datetime.now() - start)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/winsorize.py
|
winsorize.py
|
import copy
import pandas as pd
from simpleutils.asserts import require
from PyFin.Analysis.SecurityValueHolders import SecurityValueHolder
from PyFin.api import transform as transform_impl
def factor_translator(factor_pool):
if not factor_pool:
return None, None
if isinstance(factor_pool, str):
return {factor_pool: factor_pool}, [factor_pool]
elif isinstance(factor_pool, SecurityValueHolder):
return {str(factor_pool): factor_pool}, sorted(factor_pool.fields)
elif isinstance(factor_pool, dict):
dependency = set()
for k, v in factor_pool.items():
require(isinstance(k, str), ValueError,
'factor_name {0} should be string.'.format(k))
require(isinstance(v, SecurityValueHolder) or isinstance(v, str),
ValueError,
'expression {0} should be a value hodler or a string.'.format(v))
if isinstance(v, str):
dependency = dependency.union([v])
else:
dependency = dependency.union(v.fields)
return factor_pool, sorted(dependency)
elif isinstance(factor_pool, list):
factor_dict = {}
dependency = set()
k = 1
for i, f in enumerate(factor_pool):
if isinstance(f, str):
factor_dict[f] = f
dependency = dependency.union([f])
elif isinstance(f, SecurityValueHolder):
factor_dict[str(f)] = f
dependency = dependency.union(f.fields)
k += 1
return factor_dict, sorted(dependency)
else:
raise ValueError('{0} is not in valid format as factors'.format(factor_pool))
class Transformer:
def __init__(self,
expressions):
expression_dict, expression_dependency = \
factor_translator(copy.deepcopy(expressions))
if expression_dict:
self.names = sorted(expression_dict.keys())
self.expressions = [expression_dict[n] for n in self.names]
self.dependency = expression_dependency
else:
self.names = []
self.expressions = []
self.dependency = []
def transform(self, group_name, data):
if len(data) > 0:
transformed_data = transform_impl(data,
self.expressions,
self.names,
group_name,
dropna=False)
return transformed_data
else:
return pd.DataFrame()
if __name__ == '__main__':
transformer = Transformer(['c', 'a'])
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/transformer.py
|
transformer.py
|
from typing import Dict
from typing import Tuple
from typing import Union
import numba as nb
import numpy as np
import alphamind.utilities as utils
def neutralize(x: np.ndarray,
y: np.ndarray,
groups: np.ndarray = None,
detail: bool = False,
weights: np.ndarray = None) \
-> Union[np.ndarray, Tuple[np.ndarray, Dict]]:
if y.ndim == 1:
y = y.reshape((-1, 1))
if weights is None:
weights = np.ones(len(y), dtype=float)
output_dict = {}
if detail:
exposure = np.zeros(x.shape + (y.shape[1],))
explained = np.zeros(x.shape + (y.shape[1],))
output_dict['exposure'] = exposure
output_dict['explained'] = explained
if groups is not None:
res = np.zeros(y.shape)
index_diff, order = utils.groupby(groups)
start = 0
if detail:
for diff_loc in index_diff:
curr_idx = order[start:diff_loc + 1]
curr_x, b = _sub_step(x, y, weights, curr_idx, res)
exposure[curr_idx, :, :] = b
explained[curr_idx] = ls_explain(curr_x, b)
start = diff_loc + 1
else:
for diff_loc in index_diff:
curr_idx = order[start:diff_loc + 1]
_sub_step(x, y, weights, curr_idx, res)
start = diff_loc + 1
else:
try:
b = ls_fit(x, y, weights)
except np.linalg.linalg.LinAlgError:
b = ls_fit_pinv(x, y, weights)
res = ls_res(x, y, b)
if detail:
explained[:, :, :] = ls_explain(x, b)
exposure[:] = b
if output_dict:
return res, output_dict
else:
return res
def _sub_step(x, y, w, curr_idx, res) -> Tuple[np.ndarray, np.ndarray]:
curr_x, curr_y, curr_w = x[curr_idx], y[curr_idx], w[curr_idx]
try:
b = ls_fit(curr_x, curr_y, curr_w)
except np.linalg.linalg.LinAlgError:
b = ls_fit_pinv(curr_x, curr_y, curr_w)
res[curr_idx] = ls_res(curr_x, curr_y, b)
return curr_x, b
@nb.njit(nogil=True, cache=True)
def ls_fit(x: np.ndarray, y: np.ndarray, w: np.ndarray) -> np.ndarray:
x_bar = x.T * w
b = np.linalg.solve(x_bar @ x, x_bar @ y)
return b
@nb.njit(nogil=True, cache=True)
def ls_fit_pinv(x: np.ndarray, y: np.ndarray, w: np.ndarray) -> np.ndarray:
x_bar = x.T * w
b = np.linalg.pinv(x_bar @ x) @ x_bar @ y
return b
@nb.njit(nogil=True, cache=True)
def ls_res(x: np.ndarray, y: np.ndarray, b: np.ndarray) -> np.ndarray:
return y - x @ b
@nb.njit(nogil=True, cache=True)
def ls_explain(x: np.ndarray, b: np.ndarray) -> np.ndarray:
m, n = b.shape
return b.reshape((1, m, n)) * x.reshape((-1, m, 1))
if __name__ == '__main__':
x = np.random.randn(50000, 10)
y = np.random.randn(50000, 1)
w = np.ones(50000)
import datetime as dt
start = dt.datetime.now()
for _ in range(1000):
ls_fit(x, y, w)
print(dt.datetime.now() - start)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/neutralize.py
|
neutralize.py
|
import numpy as np
from alphamind.utilities import aggregate
from alphamind.utilities import array_index
from alphamind.utilities import group_mapping
from alphamind.utilities import simple_mean
from alphamind.utilities import simple_sqrsum
from alphamind.utilities import simple_std
from alphamind.utilities import transform
def standardize(x: np.ndarray, groups: np.ndarray = None, ddof=1) -> np.ndarray:
if groups is not None:
groups = group_mapping(groups)
mean_values = transform(groups, x, 'mean')
std_values = transform(groups, x, 'std', ddof)
return (x - mean_values) / np.maximum(std_values, 1e-8)
else:
return (x - simple_mean(x, axis=0)) / np.maximum(simple_std(x, axis=0, ddof=ddof), 1e-8)
def projection(x: np.ndarray, groups: np.ndarray = None, axis=1) -> np.ndarray:
if groups is not None and axis == 0:
groups = group_mapping(groups)
projected = transform(groups, x, 'project')
return projected
else:
return x / simple_sqrsum(x, axis=axis).reshape((-1, 1))
class Standardizer(object):
def __init__(self, ddof: int = 1):
self.ddof = ddof
self.mean = None
self.std = None
self.labels = None
def fit(self, x: np.ndarray, groups: np.ndarray = None):
if groups is not None:
group_index = group_mapping(groups)
self.mean = aggregate(group_index, x, 'mean')
self.std = aggregate(group_index, x, 'std', self.ddof)
self.labels = np.unique(groups)
else:
self.mean = simple_mean(x, axis=0)
self.std = simple_std(x, axis=0, ddof=self.ddof)
def transform(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
if groups is not None:
index = array_index(self.labels, groups)
return (x - self.mean[index]) / np.maximum(self.std[index], 1e-8)
else:
return (x - self.mean) / np.maximum(self.std, 1e-8)
def __call__(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
return standardize(x, groups, self.ddof)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/standardize.py
|
standardize.py
|
import abc
import sys
import pandas as pd
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from sqlalchemy import select
from alphamind.data.dbmodel.models import Universe as UniverseTable
class BaseUniverse(metaclass=abc.ABCMeta):
@abc.abstractmethod
def condition(self):
pass
def __add__(self, rhs):
return OrUniverse(self, rhs)
def __sub__(self, rhs):
return XorUniverse(self, rhs)
def __and__(self, rhs):
return AndUniverse(self, rhs)
def __or__(self, rhs):
return OrUniverse(self, rhs)
def isin(self, rhs):
return AndUniverse(self, rhs)
@abc.abstractmethod
def save(self):
pass
@classmethod
def load(cls, u_desc: dict):
pass
def query(self, engine, start_date: str = None, end_date: str = None, dates=None):
if hasattr(UniverseTable, "flag"):
more_conditions = [UniverseTable.flag == 1]
else:
more_conditions = []
query = select([UniverseTable.trade_date, UniverseTable.code.label("code")]).where(
and_(
self._query_statements(start_date, end_date, dates),
*more_conditions
)
).order_by(UniverseTable.trade_date, UniverseTable.code)
df = pd.read_sql(query, engine.engine)
df["trade_date"] = pd.to_datetime(df["trade_date"])
return df
def _query_statements(self, start_date: str = None, end_date: str = None, dates=None):
return and_(
self.condition(),
UniverseTable.trade_date.in_(dates) if dates else UniverseTable.trade_date.between(
start_date, end_date)
)
class Universe(BaseUniverse):
def __init__(self, u_name: str):
self.u_name = u_name.lower()
def condition(self):
return getattr(UniverseTable, self.u_name) == 1
def save(self):
return dict(
u_type=self.__class__.__name__,
u_name=self.u_name
)
@classmethod
def load(cls, u_desc: dict):
return cls(u_name=u_desc['u_name'])
def __eq__(self, other):
return self.u_name == other.u_name
class OrUniverse(BaseUniverse):
def __init__(self, lhs: BaseUniverse, rhs: BaseUniverse):
self.lhs = lhs
self.rhs = rhs
def condition(self):
return or_(self.lhs.condition(), self.rhs.condition())
def save(self):
return dict(
u_type=self.__class__.__name__,
lhs=self.lhs.save(),
rhs=self.rhs.save()
)
@classmethod
def load(cls, u_desc: dict):
lhs = u_desc['lhs']
rhs = u_desc['rhs']
return cls(
lhs=getattr(sys.modules[__name__], lhs['u_type']).load(lhs),
rhs=getattr(sys.modules[__name__], rhs['u_type']).load(rhs),
)
def __eq__(self, other):
return self.lhs == other.lhs and self.rhs == other.rhs and isinstance(other, OrUniverse)
class AndUniverse(BaseUniverse):
def __init__(self, lhs: BaseUniverse, rhs: BaseUniverse):
self.lhs = lhs
self.rhs = rhs
def condition(self):
return and_(self.lhs.condition(), self.rhs.condition())
def save(self):
return dict(
u_type=self.__class__.__name__,
lhs=self.lhs.save(),
rhs=self.rhs.save()
)
@classmethod
def load(cls, u_desc: dict):
lhs = u_desc['lhs']
rhs = u_desc['rhs']
return cls(
lhs=getattr(sys.modules[__name__], lhs['u_type']).load(lhs),
rhs=getattr(sys.modules[__name__], rhs['u_type']).load(rhs),
)
def __eq__(self, other):
return self.lhs == other.lhs and self.rhs == other.rhs and isinstance(other, AndUniverse)
class XorUniverse(BaseUniverse):
def __init__(self, lhs: BaseUniverse, rhs: BaseUniverse):
self.lhs = lhs
self.rhs = rhs
def condition(self):
return and_(self.lhs.condition(), not_(self.rhs.condition()))
def save(self):
return dict(
u_type=self.__class__.__name__,
lhs=self.lhs.save(),
rhs=self.rhs.save()
)
@classmethod
def load(cls, u_desc: dict):
lhs = u_desc['lhs']
rhs = u_desc['rhs']
return cls(
lhs=getattr(sys.modules[__name__], lhs['u_type']).load(lhs),
rhs=getattr(sys.modules[__name__], rhs['u_type']).load(rhs),
)
def __eq__(self, other):
return self.lhs == other.lhs and self.rhs == other.rhs and isinstance(other, XorUniverse)
def load_universe(u_desc: dict):
u_name = u_desc['u_type']
if u_name == 'Universe':
return Universe.load(u_desc)
elif u_name == 'OrUniverse':
return OrUniverse.load(u_desc)
elif u_name == 'AndUniverse':
return AndUniverse.load(u_desc)
elif u_name == 'XorUniverse':
return XorUniverse.load(u_desc)
if __name__ == '__main__':
from alphamind.data.engines.sqlengine import SqlEngine
engine = SqlEngine()
universe = Universe('custom', ['zz800'], exclude_universe=['Bank'])
print(universe.query(engine,
start_date='2018-04-26',
end_date='2018-04-26'))
print(universe.query(engine,
dates=['2017-12-21', '2017-12-25']))
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/engines/universe.py
|
universe.py
|
import os
from typing import Dict
from typing import Iterable
from alphamind.data.dbmodel.models import Market
from alphamind.data.dbmodel.models import RiskCovDay
from alphamind.data.dbmodel.models import RiskCovLong
from alphamind.data.dbmodel.models import RiskCovShort
from alphamind.data.dbmodel.models import RiskExposure
from alphamind.data.dbmodel.models import SpecificRiskDay
from alphamind.data.dbmodel.models import SpecificRiskLong
from alphamind.data.dbmodel.models import SpecificRiskShort
from alphamind.data.engines.industries import INDUSTRY_MAPPING
def _map_risk_model_table(risk_model: str) -> tuple:
if risk_model == 'day':
return RiskCovDay, SpecificRiskDay
elif risk_model == 'short':
return RiskCovShort, SpecificRiskShort
elif risk_model == 'long':
return RiskCovLong, SpecificRiskLong
else:
raise ValueError("risk model name {0} is not recognized".format(risk_model))
def _map_factors(factors: Iterable[str], used_factor_tables) -> Dict:
factor_cols = {}
factors = set(factors).difference({'trade_date', 'code', 'isOpen'})
to_keep = factors.copy()
for f in factors:
for t in used_factor_tables:
if f in t.columns:
factor_cols[t.columns[f].name] = t
to_keep.remove(f)
break
if to_keep:
raise ValueError("factors in <{0}> can't be find".format(to_keep))
return factor_cols
if "DB_VENDOR" in os.environ and os.environ["DB_VENDOR"].lower() == "mysql":
def _map_industry_category(category: str) -> str:
if category == 'sw':
return '申万行业分类(2014)'
elif category == 'zz':
return '中证行业分类'
elif category == 'zx':
return '中信标普行业分类'
elif category == 'zjh':
return '证监会行业分类(2012)-证监会'
else:
raise ValueError("No other industry is supported at the current time")
else:
def _map_industry_category(category: str) -> str:
if category == 'sw':
return '申万行业分类'
elif category == 'sw_adj':
return '申万行业分类修订'
elif category == 'zz':
return '中证行业分类'
elif category == 'dx':
return '东兴行业分类'
elif category == 'zjh':
return '证监会行业V2012'
else:
raise ValueError("No other industry is supported at the current time")
def industry_list(category: str, level: int = 1) -> list:
return INDUSTRY_MAPPING[category][level]
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/engines/utilities.py
|
utilities.py
|
INDUSTRY_MAPPING = {
'sw': {
1: ["采掘", "传媒", "电气设备", "电子", "房地产", "纺织服装", "非银金融", "钢铁", "公用事业", "国防军工", "化工", "机械设备",
"计算机", "家用电器", "建筑材料",
"建筑装饰", "交通运输", "农林牧渔", "汽车", "轻工制造", "商业贸易", "食品饮料", "通信", "休闲服务", "医药生物", "银行",
"有色金属", "综合"],
2: ["白色家电", "半导体", "包装印刷", "保险", "玻璃制造", "采掘服务", "餐饮", "畜禽养殖", "船舶制造", "地面兵装", "电机", "电力",
"电气自动化设备", "电源设备",
"电子制造", "动物保健", "多元金融", "房地产开发", "房屋建设", "纺织制造", "服装家纺", "钢铁", "港口", "高低压设备", "高速公路",
"工业金属", "公交", "光学光电子",
"航空运输", "航空装备", "航天装备", "航运", "互联网传媒", "化学纤维", "化学原料", "化学制品", "化学制药", "环保工程及服务", "黄金",
"机场", "基础建设",
"计算机设备", "计算机应用", "家用轻工", "金属非金属新材料", "金属制品", "景点", "酒店", "林业", "旅游综合", "贸易", "煤炭开采",
"农产品加工", "农业综合",
"其他采掘", "其他电子", "其他建材", "其他交运设备", "其他轻工制造", "其他休闲服务", "汽车服务", "汽车零部件", "汽车整车", "燃气",
"商业物业经营", "生物制品",
"石油化工", "石油开采", "食品加工", "视听器材", "水泥制造", "水务", "饲料", "塑料", "铁路运输", "通信设备", "通信运营",
"通用机械", "文化传媒", "物流",
"稀有金属", "橡胶", "一般零售", "医疗服务", "医疗器械", "医药商业", "仪器仪表", "银行", "饮料制造", "营销传播", "渔业", "元件",
"园林工程", "园区开发",
"运输设备", "造纸", "证券", "中药", "种植业", "专业工程", "专业零售", "专用设备", "装修装饰", "综合"],
3: ["IT服务", "LED", "氨纶", "白酒", "百货", "半导体材料", "包装印刷", "保险", "被动元件", "冰箱", "玻璃制造", "玻纤",
"彩电", "餐饮", "超市",
"城轨建设", "乘用车", "储能设备", "畜禽养殖", "船舶制造", "纯碱", "磁性材料", "氮肥", "低压设备", "涤纶", "地面兵装", "电机",
"电网自动化", "电子零部件制造",
"电子系统组装", "动物保健", "多业态零售", "多元金融", "房地产开发", "房屋建设", "纺织服装设备", "纺织化学用品", "非金属新材料",
"分立器件", "风电设备", "氟化工及制冷剂",
"辅料", "复合肥", "改性塑料", "钢结构", "港口", "高速公路", "高压设备", "工程机械", "工控自动化", "公交", "管材", "光伏设备",
"光学元件", "国际工程承包",
"果蔬加工", "海洋捕捞", "航空运输", "航空装备", "航天装备", "航运", "合成革", "互联网信息服务", "化学工程", "化学原料药", "化学制剂",
"环保工程及服务", "环保设备",
"黄金", "黄酒", "火电", "火电设备", "机场", "机床工具", "机械基础件", "集成电路", "计量仪表", "计算机设备", "家电零部件", "家纺",
"家具", "钾肥", "焦炭加工",
"金属新材料", "金属制品", "酒店", "聚氨酯", "空调", "锂", "粮食种植", "粮油加工", "林业", "磷肥", "磷化工及磷酸盐", "楼宇设备",
"路桥施工", "轮胎",
"旅游综合", "铝", "氯碱", "毛纺", "贸易", "煤炭开采", "棉纺", "民爆用品", "磨具磨料", "耐火材料", "男装", "内燃机", "农药",
"农业综合", "农用机械",
"女装", "啤酒", "平面媒体", "葡萄酒", "普钢", "其他采掘", "其他采掘服务", "其他电子", "其他纺织", "其他服装", "其他互联网服务",
"其他化学原料", "其他化学制品",
"其他基础建设", "其他家用轻工", "其他建材", "其他交运设备", "其他酒类", "其他农产品加工", "其他轻工制造", "其他塑料制品", "其他文化传媒",
"其他稀有小金属", "其他纤维",
"其他橡胶制品", "其他休闲服务", "其他种植业", "其他专业工程", "其它电源设备", "其它视听器材", "其它通用机械", "其它专用机械", "汽车服务",
"汽车零部件", "铅锌",
"燃机发电", "燃气", "热电", "人工景点", "日用化学产品", "肉制品", "乳品", "软件开发", "软饮料", "商用载货车", "商用载客车",
"生物制品", "石油加工", "石油开采",
"石油贸易", "食品综合", "水产养殖", "水电", "水利工程", "水泥制造", "水务", "丝绸", "饲料", "炭黑", "特钢", "调味发酵品",
"铁路建设", "铁路设备", "铁路运输",
"通信传输设备", "通信配套服务", "通信运营", "铜", "涂料油漆油墨制造", "维纶", "文娱用品", "钨", "无机盐", "物流", "稀土",
"洗衣机", "显示器件", "线缆部件及其他",
"小家电", "鞋帽", "新能源发电", "休闲服装", "冶金矿采化工设备", "一般物业经营", "医疗服务", "医疗器械", "医药商业", "仪器仪表",
"移动互联网服务", "银行", "印染",
"印刷包装机械", "印制电路板", "营销服务", "影视动漫", "油气钻采服务", "有线电视网络", "园林工程", "园区开发", "造纸", "粘胶", "证券",
"制冷空调设备", "中压设备",
"中药", "终端设备", "种子生产", "重型机械", "珠宝首饰", "专业连锁", "专业市场", "装修装饰", "自然景点", "综合", "综合电力设备商"]
},
'sw_adj': {
1: ["建筑材料", "机械设备", "家用电器", "交通运输", "化工", "纺织服装", "电气设备", "多元金融", "通信", "传媒", "信息服务", "银行",
"农林牧渔", "建筑装饰",
"计算机", "轻工制造", "交运设备", "信息设备", "钢铁", "采掘", "建筑建材", "商业贸易", "房地产", "有色金属", "国防军工",
"医药生物", "汽车", "公用事业",
"保险", "休闲服务", "证券", "电子", "综合", "食品饮料"]
},
'zz': {
1: ["电信业务", "工业", "公用事业", "金融地产", "可选消费", "能源", "信息技术", "医药卫生", "原材料", "主要消费"],
2: ["半导体", "保险", "传媒", "电信服务", "房地产", "公用事业", "计算机及电子设备", "计算机运用", "家庭与个人用品", "交通运输", "零售业",
"耐用消费品与服装", "能源",
"其他金融", "汽车与汽车零部件", "商业服务与用品", "食品、饮料与烟草", "食品与主要用品零售", "通信设备", "消费者服务", "医疗器械与服务",
"医药生物", "银行", "原材料",
"资本品", "资本市场"],
3: ["半导体", "包装食品与肉类", "保险", "传媒", "道路运输", "电力", "电脑与外围设备", "电气设备", "电网", "电信运营服务", "电信增值服务",
"电子设备", "多元化零售",
"房地产管理与服务", "房地产开发与园区", "纺织服装", "非金属采矿及制品", "钢铁", "个人用品", "工业集团企业", "供热或其他公用事业", "航空公司",
"航空航天与国防",
"航空货运与物流", "航运", "互联网服务", "互联网零售", "化学原料", "化学制品", "环保设备、工程与服务", "机械制造", "家常用品",
"家庭耐用消费品", "建筑材料", "建筑产品",
"建筑与工程", "交通基本设施", "酒店、餐馆与休闲", "煤炭", "能源开采设备与服务", "农牧渔产品", "其他金融服务", "其他零售", "汽车零配件与轮胎",
"汽车与摩托车", "燃气",
"日用品经销商", "容器与包装", "软件开发", "商业服务与用品", "商业银行", "生物科技", "石油与天然气", "食品与主要用品零售", "水务",
"通信设备", "消费信贷", "信息技术服务",
"休闲设备与用品", "医疗器械", "医疗用品与服务提供商", "饮料", "有色金属", "纸类与林业产品", "制药", "制药与生物科技服务", "珠宝与奢侈品",
"资本市场", "综合消费者服务"]
},
'zjh': {
1: ["采矿业", "电力、热力、燃气及水生产和供应业", "房地产业", "建筑业", "交通运输、仓储和邮政业", "教育", "金融业", "居民服务、修理和其他服务业",
"科学研究和技术服务业",
"农、林、牧、渔业", "批发和零售业", "水利、环境和公共设施管理业", "卫生和社会工作", "文化、体育和娱乐业", "信息传输、软件和信息技术服务业", "制造业",
"住宿和餐饮业", "综合",
"租赁和商务服务业"],
2: ["保险业", "餐饮业", "仓储业", "畜牧业", "道路运输业", "电力、热力生产和供应业", "电气机械和器材制造业", "电信、广播电视和卫星传输服务",
"房地产业", "房屋建筑业",
"纺织服装、服饰业", "纺织业", "非金属矿采选业", "非金属矿物制品业", "废弃资源综合利用业", "公共设施管理业", "广播、电视、电影和影视录音制作业",
"航空运输业", "黑色金属矿采选业",
"黑色金属冶炼和压延加工业", "互联网和相关服务", "化学纤维制造业", "化学原料和化学制品制造业", "货币金融服务", "机动车、电子产品和日用产品修理业",
"计算机、通信和其他电子设备制造业",
"家具制造业", "建筑安装业", "建筑装饰和其他建筑业", "教育", "金属制品业", "酒、饮料和精制茶制造业", "开采辅助活动", "林业", "零售业",
"煤炭开采和洗选业",
"木材加工和木、竹、藤、棕、草制品业", "农、林、牧、渔服务业", "农副食品加工业", "农业", "批发业", "皮革、毛皮、羽毛及其制品和制鞋业", "其他金融业",
"其他制造业", "汽车制造业",
"燃气生产和供应业", "软件和信息技术服务业", "商务服务业", "生态保护和环境治理业", "石油和天然气开采业", "石油加工、炼焦和核燃料加工业", "食品制造业",
"水的生产和供应业",
"水利管理业", "水上运输业", "体育", "铁路、船舶、航空航天和其它运输设备制造业", "铁路运输业", "通用设备制造业", "土木工程建筑业", "卫生",
"文化艺术业",
"文教、工美、体育和娱乐用品制造业", "橡胶和塑料制品业", "新闻和出版业", "研究和试验发展", "医药制造业", "仪器仪表制造业", "印刷和记录媒介复制业",
"邮政业", "有色金属矿采选业",
"有色金属冶炼和压延加工业", "渔业", "造纸和纸制品业", "住宿业", "专业技术服务业", "专用设备制造业", "装卸搬运和运输代理业", "资本市场服务",
"综合", "租赁业"],
},
'dx': {
1: ["Cyclical", "Defensive", "Sensitive"],
2: ["ConsumerDiscretionary", "ConsumerStaples", "Financials", "HealthCare", "Industrials",
"IT", "Materials",
"RealEstate", "Utilities"]
}
}
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/engines/industries.py
|
industries.py
|
import os
from typing import Dict
from typing import Iterable
from typing import List
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy import select, and_, outerjoin, join, column, Table
from sqlalchemy.sql import func
from PyFin.api import advanceDateByCalendar
from alphamind.data.dbmodel.models.postgres import metadata
from alphamind.data.dbmodel.models.postgres import FactorMaster
from alphamind.data.dbmodel.models.postgres import IndexComponent
from alphamind.data.dbmodel.models.postgres import IndexMarket
from alphamind.data.dbmodel.models.postgres import Industry
from alphamind.data.dbmodel.models.postgres import Market
from alphamind.data.dbmodel.models.postgres import RiskExposure
from alphamind.data.dbmodel.models.postgres import RiskMaster
from alphamind.data.dbmodel.models.postgres import Universe as UniverseTable
from alphamind.data.engines.universe import Universe
from alphamind.data.engines.utilities import _map_factors
from alphamind.data.engines.utilities import _map_industry_category
from alphamind.data.engines.utilities import _map_risk_model_table
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.portfolio.riskmodel import FactorRiskModel
risk_styles = ['BETA',
'MOMENTUM',
'SIZE',
'EARNYILD',
'RESVOL',
'GROWTH',
'BTOP',
'LEVERAGE',
'LIQUIDTY',
'SIZENL']
industry_styles = [
'Bank',
'RealEstate',
'Health',
'Transportation',
'Mining',
'NonFerMetal',
'HouseApp',
'LeiService',
'MachiEquip',
'BuildDeco',
'CommeTrade',
'CONMAT',
'Auto',
'Textile',
'FoodBever',
'Electronics',
'Computer',
'LightIndus',
'Utilities',
'Telecom',
'AgriForest',
'CHEM',
'Media',
'IronSteel',
'NonBankFinan',
'ELECEQP',
'AERODEF',
'Conglomerates'
]
macro_styles = ['COUNTRY']
total_risk_factors = risk_styles + industry_styles + macro_styles
DAILY_RETURN_OFFSET = 0
class SqlEngine:
def __init__(self, db_url: str, factor_tables: List[str] = None):
self._engine = sa.create_engine(db_url)
self._session = self.create_session()
if factor_tables:
self._factor_tables = [Table(name, metadata, autoload=True, autoload_with=self._engine)
for name in factor_tables]
else:
try:
factor_tables = os.environ["FACTOR_TABLES"]
self._factor_tables = [Table(name.strip(), metadata, autoload=True, autoload_with=self._engine)
for name in factor_tables.split(",")]
except KeyError:
self._factor_tables = []
self._factor_tables += [Table(name, metadata, autoload=True, autoload_with=self._engine)
for name in ["stk_daily_price_pro", "risk_exposure"] if name not in factor_tables]
self.ln_func = func.ln
def __del__(self):
if self._session:
self._session.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._session:
self._session.close()
@property
def engine(self):
return self._engine
@property
def session(self):
return self._session
def create_session(self):
db_session = orm.sessionmaker(bind=self.engine)
return db_session()
def fetch_factors_meta(self) -> pd.DataFrame:
query = self.session.query(FactorMaster)
return pd.read_sql(query.statement, query.session.bind)
def fetch_risk_meta(self) -> pd.DataFrame:
query = self.session.query(RiskMaster)
return pd.read_sql(query.statement, query.session.bind)
def fetch_codes(self, ref_date: str, universe: Universe) -> List[int]:
df = universe.query(self, ref_date, ref_date)
return sorted(df.code.tolist())
def fetch_codes_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None) -> pd.DataFrame:
return universe.query(self, start_date, end_date, dates)
def _create_stats(self, table, horizon, offset, code_attr='code'):
stats = func.sum(self.ln_func(1. + table.chgPct)).over(
partition_by=getattr(table, code_attr),
order_by=table.trade_date,
rows=(
1 + DAILY_RETURN_OFFSET + offset, 1 + horizon + DAILY_RETURN_OFFSET + offset)).label(
'dx')
return stats
def fetch_dx_return(self,
ref_date: str,
codes: Iterable[int],
expiry_date: str = None,
horizon: int = 0,
offset: int = 0,
neutralized_risks: list = None,
pre_process=None,
post_process=None,
benchmark: int = None) -> pd.DataFrame:
start_date = ref_date
if not expiry_date:
end_date = advanceDateByCalendar('china.sse', ref_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y%m%d')
else:
end_date = expiry_date
stats = self._create_stats(Market, horizon, offset)
query = select([Market.trade_date, Market.code, stats]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(codes)
)
)
df = pd.read_sql(query, self.session.bind).dropna()
df = df[df.trade_date == ref_date]
if benchmark:
stats = self._create_stats(IndexMarket, horizon, offset, code_attr='indexCode')
query = select([IndexMarket.trade_date, stats]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark
)
)
df2 = pd.read_sql(query, self.session.bind).dropna()
ind_ret = df2[df2.trade_date == ref_date]['dx'].values[0]
df['dx'] = df['dx'] - ind_ret
if neutralized_risks:
_, risk_exp = self.fetch_risk_model(ref_date, codes)
df = pd.merge(df, risk_exp, on='code').dropna()
df[['dx']] = factor_processing(df[['dx']].values,
pre_process=pre_process,
risk_factors=df[neutralized_risks].values,
post_process=post_process)
return df[['code', 'dx']]
def fetch_dx_return_range(self,
universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
horizon: int = 0,
offset: int = 0,
benchmark: int = None) -> pd.DataFrame:
if dates:
start_date = dates[0]
end_date = dates[-1]
end_date = advanceDateByCalendar('china.sse', end_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y-%m-%d')
stats = self._create_stats(Market, horizon, offset)
codes = universe.query(self.engine, start_date, end_date, dates)
t = select([Market.trade_date, Market.code, stats]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(codes.code.unique().tolist())
)
).cte('t')
cond = universe._query_statements(start_date, end_date, dates)
query = select([t]).where(
and_(t.columns['trade_date'] == UniverseTable.trade_date,
t.columns['code'] == UniverseTable.code,
cond)
)
df = pd.read_sql(query, self.session.bind).dropna().set_index('trade_date')
if benchmark:
stats = self._create_stats(IndexMarket, horizon, offset, code_attr='indexCode')
query = select([IndexMarket.trade_date, stats]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark
)
)
df2 = pd.read_sql(query, self.session.bind).dropna().set_index('trade_date')
df['dx'] = df['dx'].values - df2.loc[df.index]['dx'].values
return df.reset_index().sort_values(['trade_date', 'code'])
def fetch_dx_return_index(self,
ref_date: str,
index_code: int,
expiry_date: str = None,
horizon: int = 0,
offset: int = 0) -> pd.DataFrame:
start_date = ref_date
if not expiry_date:
end_date = advanceDateByCalendar('china.sse', ref_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y%m%d')
else:
end_date = expiry_date
stats = self._create_stats(IndexMarket, horizon, offset, code_attr='indexCode')
query = select([IndexMarket.trade_date, IndexMarket.indexCode.label('code'), stats]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == index_code
)
)
df = pd.read_sql(query, self.session.bind).dropna()
df = df[df.trade_date == ref_date]
return df[['code', 'dx']]
def fetch_dx_return_index_range(self,
index_code,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
horizon: int = 0,
offset: int = 0) -> pd.DataFrame:
if dates:
start_date = dates[0]
end_date = dates[-1]
end_date = advanceDateByCalendar('china.sse', end_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y-%m-%d')
stats = self._create_stats(IndexMarket, horizon, offset, code_attr='indexCode')
query = select([IndexMarket.trade_date, IndexMarket.indexCode.label('code'), stats]) \
.where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == index_code
)
)
df = pd.read_sql(query, self.session.bind).dropna()
if dates:
df = df[df.trade_date.isin(dates)]
return df
def fetch_factor(self,
ref_date: str,
factors: Iterable[object],
codes: Iterable[int],
warm_start: int = 0) -> pd.DataFrame:
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
dependency = transformer.dependency
factor_cols = _map_factors(dependency, self._factor_tables)
start_date = advanceDateByCalendar('china.sse', ref_date, str(-warm_start) + 'b').strftime(
'%Y-%m-%d')
end_date = ref_date
big_table = Market
joined_tables = set()
joined_tables.add(Market.__table__.name)
for t in set(factor_cols.values()):
if t.name not in joined_tables:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["code"]))
joined_tables.add(t.name)
query = select(
[Market.trade_date, Market.code, Market.chgPct, Market.secShortName] + list(
column(k) for k in factor_cols.keys())) \
.select_from(big_table).where(and_(Market.trade_date.between(start_date, end_date),
Market.code.in_(codes)))
df = pd.read_sql(query, self.engine) \
.replace([-np.inf, np.inf], np.nan) \
.sort_values(['trade_date', 'code']) \
.set_index('trade_date')
res = transformer.transform('code', df).replace([-np.inf, np.inf], np.nan)
res['chgPct'] = df.chgPct
res['secShortName'] = df['secShortName']
res = res.loc[ref_date:ref_date, :]
res.index = list(range(len(res)))
return res
def fetch_factor_range(self,
universe: Universe,
factors: Union[Transformer, Iterable[object]],
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
external_data: pd.DataFrame = None) -> pd.DataFrame:
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
dependency = transformer.dependency
factor_cols = _map_factors(dependency, self._factor_tables)
big_table = Market
joined_tables = set()
joined_tables.add(Market.__table__.name)
for t in set(factor_cols.values()):
if t.name not in joined_tables:
if dates is not None:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["code"],
Market.trade_date.in_(dates)))
else:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["code"],
Market.trade_date.between(start_date,
end_date)))
joined_tables.add(t.name)
universe_df = universe.query(self, start_date, end_date, dates)
query = select(
[Market.trade_date, Market.code, Market.chgPct, Market.secShortName] + list(
column(k) for k in factor_cols.keys())) \
.select_from(big_table).where(
and_(
Market.code.in_(universe_df.code.unique().tolist()),
Market.trade_date.in_(dates) if dates is not None else Market.trade_date.between(
start_date, end_date)
)
).distinct()
df = pd.read_sql(query, self.engine).replace([-np.inf, np.inf], np.nan)
if external_data is not None:
df = pd.merge(df, external_data, on=['trade_date', 'code']).dropna()
df.sort_values(['trade_date', 'code'], inplace=True)
df.set_index('trade_date', inplace=True)
res = transformer.transform('code', df).replace([-np.inf, np.inf], np.nan)
res['chgPct'] = df.chgPct
res['secShortName'] = df['secShortName']
res = res.reset_index()
return pd.merge(res, universe_df[['trade_date', 'code']], how='inner').drop_duplicates(
['trade_date', 'code'])
def fetch_factor_range_forward(self,
universe: Universe,
factors: Union[Transformer, object],
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None):
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
dependency = transformer.dependency
factor_cols = _map_factors(dependency, self._factor_tables)
codes = universe.query(self, start_date, end_date, dates)
total_codes = codes.code.unique().tolist()
total_dates = codes.trade_date.astype(str).unique().tolist()
big_table = Market
joined_tables = set()
joined_tables.add(Market.__table__.name)
for t in set(factor_cols.values()):
if t.name not in joined_tables:
if dates is not None:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["code"],
Market.trade_date.in_(dates)))
else:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["code"],
Market.trade_date.between(start_date,
end_date)))
joined_tables.add(t.name)
stats = func.lag(list(factor_cols.keys())[0], -1).over(
partition_by=Market.code,
order_by=Market.trade_date).label('dx')
query = select([Market.trade_date, Market.code, Market.chgPct, stats]).select_from(
big_table).where(
and_(
Market.trade_date.in_(total_dates),
Market.code.in_(total_codes)
)
)
df = pd.read_sql(query, self.engine) \
.replace([-np.inf, np.inf], np.nan) \
.sort_values(['trade_date', 'code'])
return pd.merge(df, codes[['trade_date', 'code']], how='inner').drop_duplicates(
['trade_date', 'code'])
def fetch_benchmark(self,
ref_date: str,
benchmark: int,
codes: Iterable[int] = None) -> pd.DataFrame:
query = select([IndexComponent.code, (IndexComponent.weight / 100.).label('weight')]).where(
and_(
IndexComponent.trade_date == ref_date,
IndexComponent.indexCode == benchmark
)
)
df = pd.read_sql(query, self.engine)
if codes:
df.set_index(['code'], inplace=True)
df = df.reindex(codes).fillna(0.)
df.reset_index(inplace=True)
return df
def fetch_benchmark_range(self,
benchmark: int,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None) -> pd.DataFrame:
cond = IndexComponent.trade_date.in_(dates) if dates else IndexComponent.trade_date.between(
start_date,
end_date)
query = select(
[IndexComponent.trade_date, IndexComponent.code,
(IndexComponent.weight / 100.).label('weight')]).where(
and_(
cond,
IndexComponent.indexCode == benchmark
)
)
return pd.read_sql(query, self.engine)
def fetch_risk_model(self,
ref_date: str,
codes: Iterable[int],
risk_model: str = 'short',
excluded: Iterable[str] = None,
model_type: str = None) -> Union[
FactorRiskModel, Tuple[pd.DataFrame, pd.DataFrame]]:
risk_cov_table, special_risk_table = _map_risk_model_table(risk_model)
cov_risk_cols = [risk_cov_table.__table__.columns[f] for f in total_risk_factors]
query = select([risk_cov_table.FactorID,
risk_cov_table.Factor]
+ cov_risk_cols).where(
risk_cov_table.trade_date == ref_date
)
risk_cov = pd.read_sql(query, self.engine).sort_values('FactorID')
if excluded:
risk_exposure_cols = [RiskExposure.__table__.columns[f] for f in total_risk_factors if
f not in set(excluded)]
else:
risk_exposure_cols = [RiskExposure.__table__.columns[f] for f in total_risk_factors]
big_table = join(RiskExposure,
special_risk_table,
and_(
RiskExposure.code == special_risk_table.code,
RiskExposure.trade_date == special_risk_table.trade_date
))
query = select(
[RiskExposure.code, special_risk_table.SRISK.label('srisk')] + risk_exposure_cols) \
.select_from(big_table).where(
and_(RiskExposure.trade_date == ref_date,
RiskExposure.code.in_(codes)
))
risk_exp = pd.read_sql(query, self.engine).dropna()
if not model_type:
return risk_cov, risk_exp
elif model_type == 'factor':
factor_names = risk_cov.Factor.tolist()
new_risk_cov = risk_cov.set_index('Factor')
factor_cov = new_risk_cov.loc[factor_names, factor_names] / 10000.
new_risk_exp = risk_exp.set_index('code')
factor_loading = new_risk_exp.loc[:, factor_names]
idsync = new_risk_exp['srisk'] * new_risk_exp['srisk'] / 10000
return FactorRiskModel(factor_cov, factor_loading, idsync), risk_cov, risk_exp
def fetch_risk_model_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
risk_model: str = 'short',
excluded: Iterable[str] = None,
model_type: str = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
risk_cov_table, special_risk_table = _map_risk_model_table(risk_model)
cov_risk_cols = [risk_cov_table.__table__.columns[f] for f in total_risk_factors]
cond = risk_cov_table.trade_date.in_(dates) if dates else risk_cov_table.trade_date.between(
start_date,
end_date)
query = select([risk_cov_table.trade_date,
risk_cov_table.FactorID,
risk_cov_table.Factor]
+ cov_risk_cols).where(
cond
)
risk_cov = pd.read_sql(query, self.engine).sort_values(['trade_date', 'FactorID'])
if not excluded:
excluded = []
risk_exposure_cols = [RiskExposure.__table__.columns[f] for f in total_risk_factors if
f not in set(excluded)]
cond = universe._query_statements(start_date, end_date, dates)
big_table = join(RiskExposure, UniverseTable,
and_(
RiskExposure.trade_date == UniverseTable.trade_date,
RiskExposure.code == UniverseTable.code,
cond
)
)
big_table = join(special_risk_table,
big_table,
and_(
RiskExposure.code == special_risk_table.code,
RiskExposure.trade_date == special_risk_table.trade_date,
))
query = select(
[RiskExposure.trade_date,
RiskExposure.code,
special_risk_table.SRISK.label('srisk')] + risk_exposure_cols).select_from(big_table) \
.distinct()
risk_exp = pd.read_sql(query, self.engine).sort_values(['trade_date', 'code']).dropna()
if not model_type:
return risk_cov, risk_exp
elif model_type == 'factor':
new_risk_cov = risk_cov.set_index('Factor')
new_risk_exp = risk_exp.set_index('code')
risk_cov_groups = new_risk_cov.groupby('trade_date')
risk_exp_groups = new_risk_exp.groupby('trade_date')
models = dict()
for ref_date, cov_g in risk_cov_groups:
exp_g = risk_exp_groups.get_group(ref_date)
factor_names = cov_g.index.tolist()
factor_cov = cov_g.loc[factor_names, factor_names] / 10000.
factor_loading = exp_g.loc[:, factor_names]
idsync = exp_g['srisk'] * exp_g['srisk'] / 10000
models[ref_date] = FactorRiskModel(factor_cov, factor_loading, idsync)
return pd.Series(models), risk_cov, risk_exp
def fetch_industry(self,
ref_date: str,
codes: Iterable[int] = None,
category: str = 'sw',
level: int = 1):
industry_category_name = _map_industry_category(category)
code_name = 'industryID' + str(level)
category_name = 'industryName' + str(level)
cond = and_(
Industry.trade_date == ref_date,
Industry.code.in_(codes),
Industry.industry == industry_category_name
) if codes else and_(
Industry.trade_date == ref_date,
Industry.industry == industry_category_name
)
query = select([Industry.code,
getattr(Industry, code_name).label('industry_code'),
getattr(Industry, category_name).label('industry')]).where(
cond
).distinct()
return pd.read_sql(query, self.engine).dropna().drop_duplicates(['code'])
def fetch_industry_matrix(self,
ref_date: str,
codes: Iterable[int] = None,
category: str = 'sw',
level: int = 1):
df = self.fetch_industry(ref_date, codes, category, level)
df['industry_name'] = df['industry']
df = pd.get_dummies(df, columns=['industry'], prefix="", prefix_sep="")
return df.drop('industry_code', axis=1)
def fetch_industry_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
category: str = 'sw',
level: int = 1):
industry_category_name = _map_industry_category(category)
cond = universe._query_statements(start_date, end_date, dates)
big_table = join(Industry, UniverseTable,
and_(
Industry.trade_date == UniverseTable.trade_date,
Industry.code == UniverseTable.code,
Industry.industry == industry_category_name,
cond
))
code_name = 'industryID' + str(level)
category_name = 'industryName' + str(level)
query = select([Industry.trade_date,
Industry.code,
getattr(Industry, code_name).label('industry_code'),
getattr(Industry, category_name).label('industry')]).select_from(big_table) \
.order_by(Industry.trade_date, Industry.code)
return pd.read_sql(query, self.engine).dropna()
def fetch_industry_matrix_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
category: str = 'sw',
level: int = 1):
df = self.fetch_industry_range(universe, start_date, end_date, dates, category, level)
df['industry_name'] = df['industry']
df = pd.get_dummies(df, columns=['industry'], prefix="", prefix_sep="")
return df.drop('industry_code', axis=1).drop_duplicates(['trade_date', 'code'])
def fetch_trade_status(self,
ref_date: str,
codes: Iterable[int],
offset=0):
target_date = advanceDateByCalendar('china.sse', ref_date,
str(offset) + 'b').strftime('%Y%m%d')
stats = func.lead(Market.isOpen, 1).over(
partition_by=Market.code,
order_by=Market.trade_date).label('is_open')
cte = select([Market.trade_date, Market.code, stats]).where(
and_(
Market.trade_date.in_([ref_date, target_date]),
Market.code.in_(codes)
)
).cte('cte')
query = select([column('code'), column('is_open')]).select_from(cte).where(
column('trade_date') == ref_date
).order_by(column('code'))
return pd.read_sql(query, self.engine).sort_values(['code'])
def fetch_trade_status_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
offset=0):
codes = universe.query(self, start_date, end_date, dates)
if dates:
start_date = dates[0]
end_date = dates[-1]
end_date = advanceDateByCalendar('china.sse', end_date,
str(offset) + 'b').strftime('%Y-%m-%d')
stats = func.lead(Market.isOpen, offset).over(
partition_by=Market.code,
order_by=Market.trade_date).label('is_open')
cte = select([Market.trade_date, Market.code, stats]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(codes.code.unique().tolist())
)
).cte('cte')
query = select([cte]).select_from(cte).order_by(cte.columns['trade_date'],
cte.columns['code'])
df = pd.read_sql(query, self.engine)
return pd.merge(df, codes[['trade_date', 'code']], on=['trade_date', 'code'])
def fetch_data(self,
ref_date: str,
factors: Iterable[str],
codes: Iterable[int],
benchmark: int = None,
risk_model: str = 'short',
industry: str = 'sw') -> Dict[str, pd.DataFrame]:
total_data = {}
transformer = Transformer(factors)
factor_data = self.fetch_factor(ref_date,
transformer,
codes)
if benchmark:
benchmark_data = self.fetch_benchmark(ref_date, benchmark)
total_data['benchmark'] = benchmark_data
factor_data = pd.merge(factor_data, benchmark_data, how='left', on=['code'])
factor_data['weight'] = factor_data['weight'].fillna(0.)
if risk_model:
excluded = list(set(total_risk_factors).intersection(transformer.dependency))
risk_cov, risk_exp = self.fetch_risk_model(ref_date, codes, risk_model, excluded)
factor_data = pd.merge(factor_data, risk_exp, how='left', on=['code'])
total_data['risk_cov'] = risk_cov
industry_info = self.fetch_industry(ref_date=ref_date,
codes=codes,
category=industry)
factor_data = pd.merge(factor_data, industry_info, on=['code'])
total_data['factor'] = factor_data
return total_data
def fetch_data_range(self,
universe: Universe,
factors: Iterable[str],
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
benchmark: int = None,
risk_model: str = 'short',
industry: str = 'sw',
external_data: pd.DataFrame = None) -> Dict[str, pd.DataFrame]:
total_data = {}
transformer = Transformer(factors)
factor_data = self.fetch_factor_range(universe,
transformer,
start_date,
end_date,
dates,
external_data=external_data)
if benchmark:
benchmark_data = self.fetch_benchmark_range(benchmark, start_date, end_date, dates)
total_data['benchmark'] = benchmark_data
factor_data = pd.merge(factor_data, benchmark_data, how='left',
on=['trade_date', 'code'])
factor_data['weight'] = factor_data['weight'].fillna(0.)
if risk_model:
excluded = list(set(total_risk_factors).intersection(transformer.dependency))
risk_cov, risk_exp = self.fetch_risk_model_range(universe, start_date, end_date, dates,
risk_model,
excluded)
factor_data = pd.merge(factor_data, risk_exp, how='left', on=['trade_date', 'code'])
total_data['risk_cov'] = risk_cov
industry_info = self.fetch_industry_range(universe,
start_date=start_date,
end_date=end_date,
dates=dates,
category=industry)
factor_data = pd.merge(factor_data, industry_info, on=['trade_date', 'code'])
total_data['factor'] = factor_data
return total_data
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/engines/sqlengine/postgres.py
|
postgres.py
|
import os
from typing import Iterable
from typing import List
from typing import Tuple
from typing import Union
from typing import Dict
import numpy as np
import pandas as pd
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy import (
and_,
column,
join,
select,
outerjoin,
Table
)
from PyFin.api import advanceDateByCalendar
from alphamind.data.dbmodel.models.mysql import (
metadata,
Market,
IndexMarket,
Industry,
RiskExposure,
Universe as UniverseTable,
IndexComponent,
IndexWeight,
)
from alphamind.data.engines.utilities import _map_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.engines.utilities import _map_risk_model_table
from alphamind.portfolio.riskmodel import FactorRiskModel
from alphamind.data.transformer import Transformer
risk_styles = ['BETA',
'MOMENTUM',
'SIZE',
'EARNYILD',
'RESVOL',
'GROWTH',
'BTOP',
'LEVERAGE',
'LIQUIDTY',
'SIZENL']
industry_styles = [
'Bank',
'RealEstate',
'Health',
'Transportation',
'Mining',
'NonFerMetal',
'HouseApp',
'LeiService',
'MachiEquip',
'BuildDeco',
'CommeTrade',
'CONMAT',
'Auto',
'Textile',
'FoodBever',
'Electronics',
'Computer',
'LightIndus',
'Utilities',
'Telecom',
'AgriForest',
'CHEM',
'Media',
'IronSteel',
'NonBankFinan',
'ELECEQP',
'AERODEF',
'Conglomerates'
]
macro_styles = ['COUNTRY']
total_risk_factors = risk_styles + industry_styles + macro_styles
_map_index_codes = {
300: "2070000060",
905: "2070000187",
}
DAILY_RETURN_OFFSET = 0
class SqlEngine:
def __init__(self, db_url: str, factor_tables: List[str] = None):
self._engine = sa.create_engine(db_url)
self._session = self.create_session()
if factor_tables:
self._factor_tables = [Table(name, metadata, autoload=True, autoload_with=self._engine)
for name in factor_tables]
else:
try:
factor_tables = os.environ["FACTOR_TABLES"]
self._factor_tables = [Table(name.strip(), metadata, autoload=True, autoload_with=self._engine)
for name in factor_tables.split(",")]
except KeyError:
self._factor_tables = []
self._factor_tables += [Table(name, metadata, autoload=True, autoload_with=self._engine)
for name in ["stk_daily_price_pro", "risk_exposure"] if name not in factor_tables]
def __del__(self):
if self._session:
self._session.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._session:
self._session.close()
@property
def engine(self):
return self._engine
@property
def session(self):
return self._session
def create_session(self):
db_session = orm.sessionmaker(bind=self._engine)
return db_session()
def _create_stats(self, df, horizon, offset, no_code=False):
df["trade_date"] = pd.to_datetime(df["trade_date"])
df.set_index("trade_date", inplace=True)
df["dx"] = np.log(1. + df["chgPct"] / 100.)
if not no_code:
df = df.groupby("code").rolling(window=horizon + 1)['dx'].sum() \
.groupby(level=0).shift(-(horizon + offset + 1)).dropna().reset_index()
else:
df = df.rolling(window=horizon + 1)['dx'].sum().shift(-(horizon + offset + 1)).dropna().reset_index()
return df
def fetch_dx_return(self,
ref_date: str,
codes: Iterable[int],
expiry_date: str = None,
horizon: int = 0,
offset: int = 0,
neutralized_risks: list = None,
pre_process=None,
post_process=None,
benchmark: int = None) -> pd.DataFrame:
start_date = ref_date
if not expiry_date:
end_date = advanceDateByCalendar('china.sse', ref_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y-%m-%d')
else:
end_date = expiry_date
query = select([Market.trade_date, Market.code.label("code"), Market.chgPct.label("chgPct")]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(codes),
Market.flag == 1
)
).order_by(Market.trade_date, Market.code)
df = pd.read_sql(query, self.session.bind).dropna()
df = self._create_stats(df, horizon, offset)
df = df[df.trade_date == ref_date]
if benchmark:
benchmark = _map_index_codes[benchmark]
query = select([IndexMarket.trade_date, IndexMarket.chgPct.label("chgPct")]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark,
IndexMarket.flag == 1
)
)
df2 = pd.read_sql(query, self.session.bind).dropna()
df2 = self._create_stats(df2, horizon, offset, no_code=True)
ind_ret = df2[df2.trade_date == ref_date]['dx'].values[0]
df['dx'] = df['dx'] - ind_ret
if neutralized_risks:
_, risk_exp = self.fetch_risk_model(ref_date, codes)
df = pd.merge(df, risk_exp, on='code').dropna()
df[['dx']] = factor_processing(df[['dx']].values,
pre_process=pre_process,
risk_factors=df[neutralized_risks].values,
post_process=post_process)
return df[['code', 'dx']]
def fetch_dx_return_index(self,
ref_date: str,
index_code: int,
expiry_date: str = None,
horizon: int = 0,
offset: int = 0) -> pd.DataFrame:
start_date = ref_date
index_code = _map_index_codes[index_code]
if not expiry_date:
end_date = advanceDateByCalendar('china.sse', ref_date,
str(1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y%m%d')
else:
end_date = expiry_date
query = select([IndexMarket.trade_date,
IndexMarket.indexCode.label('code'),
IndexMarket.chgPct.label("chgPct")]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == index_code,
IndexMarket.flag == 1
)
).order_by(IndexMarket.trade_date, IndexMarket.indexCode)
df = pd.read_sql(query, self.session.bind).dropna()
df = self._create_stats(df, horizon, offset)
df = df[df.trade_date == ref_date]
return df[['code', 'dx']]
def fetch_dx_return_range(self,
universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
horizon: int = 0,
offset: int = 0,
benchmark: int = None) -> pd.DataFrame:
if dates:
start_date = dates[0]
end_date = dates[-1]
end_date = advanceDateByCalendar('china.sse', end_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y-%m-%d')
codes = universe.query(self.engine, start_date, end_date, dates)
t1 = select([Market.trade_date, Market.code.label("code"), Market.chgPct.label("chgPct")]).where(
and_(
Market.trade_date.between(start_date, end_date),
Market.code.in_(codes.code.unique().tolist()),
Market.flag == 1
)
)
df1 = pd.read_sql(t1, self.session.bind).dropna()
df1 = self._create_stats(df1, horizon, offset)
df2 = self.fetch_codes_range(universe, start_date, end_date, dates)
df2["trade_date"] = pd.to_datetime(df2["trade_date"])
df = pd.merge(df1, df2, on=["trade_date", "code"])
df = df.set_index("trade_date")
if benchmark:
benchmark = _map_index_codes[benchmark]
query = select([IndexMarket.trade_date,
IndexMarket.chgPct.label("chgPct")]).where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == benchmark,
IndexMarket.flag == 1
)
)
df2 = pd.read_sql(query, self.session.bind).dropna().drop_duplicates(["trade_date"])
df2 = self._create_stats(df2, horizon, offset, no_code=True).set_index("trade_date")
df['dx'] = df['dx'].values - df2.loc[df.index]['dx'].values
if dates:
df = df[df.index.isin(dates)]
return df.reset_index().sort_values(['trade_date', 'code'])
def fetch_dx_return_index_range(self,
index_code,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
horizon: int = 0,
offset: int = 0) -> pd.DataFrame:
if dates:
start_date = dates[0]
end_date = dates[-1]
index_code = _map_index_codes[index_code]
end_date = advanceDateByCalendar('china.sse', end_date,
str(
1 + horizon + offset + DAILY_RETURN_OFFSET) + 'b').strftime(
'%Y-%m-%d')
query = select([IndexMarket.trade_date,
IndexMarket.indexCode.label('code'),
IndexMarket.chgPct.label("chgPct")]) \
.where(
and_(
IndexMarket.trade_date.between(start_date, end_date),
IndexMarket.indexCode == index_code,
IndexMarket.flag == 1
)
)
df = pd.read_sql(query, self.session.bind).dropna().drop_duplicates(["trade_date", "code"])
df = self._create_stats(df, horizon, offset)
if dates:
df = df[df.trade_date.isin(dates)]
return df
def fetch_codes(self, ref_date: str, universe: Universe) -> List[int]:
df = universe.query(self, ref_date, ref_date)
return sorted(df.code.tolist())
def fetch_codes_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None) -> pd.DataFrame:
return universe.query(self, start_date, end_date, dates)
def fetch_factor(self,
ref_date: str,
factors: Iterable[object],
codes: Iterable[int],
warm_start: int = 0) -> pd.DataFrame:
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
dependency = transformer.dependency
factor_cols = _map_factors(dependency, self._factor_tables)
start_date = advanceDateByCalendar('china.sse', ref_date, str(-warm_start) + 'b').strftime(
'%Y-%m-%d')
end_date = ref_date
big_table = Market
joined_tables = set()
joined_tables.add(Market.__table__.name)
for t in set(factor_cols.values()):
if t.name not in joined_tables:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["security_code"],
Market.flag == 1,
t.columns["flag"] == 1))
joined_tables.add(t.name)
query = select(
[Market.trade_date, Market.code.label("code"),
Market.chgPct.label("chgPct"),
Market.secShortName.label("secShortName")] + list(
column(k) for k in factor_cols.keys())) \
.select_from(big_table).where(and_(Market.trade_date.between(start_date, end_date),
Market.code.in_(codes),
Market.flag == 1))
df = pd.read_sql(query, self.engine) \
.replace([-np.inf, np.inf], np.nan) \
.sort_values(['trade_date', 'code']) \
.drop_duplicates(["trade_date", "code"]) \
.set_index('trade_date')
res = transformer.transform('code', df).replace([-np.inf, np.inf], np.nan)
res['chgPct'] = df.chgPct
res['secShortName'] = df['secShortName']
res.index = pd.to_datetime(res.index)
res = res.loc[ref_date:ref_date, :]
res.index = list(range(len(res)))
return res
def fetch_factor_range(self,
universe: Universe,
factors: Union[Transformer, Iterable[object]],
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
external_data: pd.DataFrame = None) -> pd.DataFrame:
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
dependency = transformer.dependency
factor_cols = _map_factors(dependency, self._factor_tables)
big_table = Market
joined_tables = set()
joined_tables.add(Market.__table__.name)
for t in set(factor_cols.values()):
if t.name not in joined_tables:
if dates is not None:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["security_code"],
Market.trade_date.in_(dates),
Market.flag == 1,
t.columns["flag"] == 1))
else:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.columns["trade_date"],
Market.code == t.columns["security_code"],
Market.trade_date.between(start_date,
end_date),
Market.flag == 1,
t.columns["flag"] == 1))
joined_tables.add(t.name)
universe_df = universe.query(self, start_date, end_date, dates)
query = select(
[Market.trade_date,
Market.code.label("code"),
Market.chgPct.label("chgPct"),
Market.secShortName.label("secShortName")] + list(
column(k) for k in factor_cols.keys())) \
.select_from(big_table).where(
and_(
Market.code.in_(universe_df.code.unique().tolist()),
Market.trade_date.in_(dates) if dates is not None else Market.trade_date.between(
start_date, end_date),
Market.flag == 1
)
).distinct()
df = pd.read_sql(query, self.engine).replace([-np.inf, np.inf], np.nan).drop_duplicates(["trade_date", "code"])
if external_data is not None:
df = pd.merge(df, external_data, on=['trade_date', 'code']).dropna()
df = df.sort_values(["trade_date", "code"]).drop_duplicates(subset=["trade_date", "code"])
df.set_index('trade_date', inplace=True)
res = transformer.transform('code', df).replace([-np.inf, np.inf], np.nan)
res['chgPct'] = df.chgPct
res['secShortName'] = df['secShortName']
res = res.reset_index()
res["trade_date"] = pd.to_datetime(res["trade_date"])
return pd.merge(res, universe_df[['trade_date', 'code']], how='inner').drop_duplicates(
['trade_date', 'code'])
def fetch_industry(self,
ref_date: str,
codes: Iterable[int] = None,
category: str = 'sw',
level: int = 1):
code_name = 'industry_code' + str(level)
category_name = 'industry_name' + str(level)
cond = and_(
Industry.trade_date == ref_date,
Industry.code.in_(codes),
Industry.flag == 1
) if codes else and_(
Industry.trade_date == ref_date,
Industry.flag == 1
)
query = select([Industry.code.label("code"),
getattr(Industry, code_name).label('industry_code'),
getattr(Industry, category_name).label('industry')]).where(
cond
).distinct()
return pd.read_sql(query, self.engine).dropna().drop_duplicates(['code'])
def fetch_industry_matrix(self,
ref_date: str,
codes: Iterable[int] = None,
category: str = 'sw',
level: int = 1):
df = self.fetch_industry(ref_date, codes, category, level)
df['industry_name'] = df['industry']
df = pd.get_dummies(df, columns=['industry'], prefix="", prefix_sep="")
return df.drop('industry_code', axis=1)
def fetch_industry_matrix_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
category: str = 'sw',
level: int = 1):
df = self.fetch_industry_range(universe, start_date, end_date, dates, category, level)
df['industry_name'] = df['industry']
df = pd.get_dummies(df, columns=['industry'], prefix="", prefix_sep="")
return df.drop('industry_code', axis=1).drop_duplicates(['trade_date', 'code'])
def fetch_industry_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
category: str = 'sw',
level: int = 1):
code_name = 'industry_code' + str(level)
category_name = 'industry_name' + str(level)
cond = universe._query_statements(start_date, end_date, dates)
query = select([Industry.code.label("code"),
Industry.trade_date,
getattr(Industry, code_name).label('industry_code'),
getattr(Industry, category_name).label('industry')]).where(
and_(
*cond,
Industry.code == UniverseTable.code,
Industry.trade_date == UniverseTable.trade_date,
Industry.flag == 1
)
).distinct()
df = pd.read_sql(query, self.session.bind)
df["trade_date"] = pd.to_datetime(df["trade_date"])
return df
def fetch_risk_model(self,
ref_date: str,
codes: Iterable[int],
risk_model: str = 'short',
excluded: Iterable[str] = None,
model_type: str = None) -> Union[
FactorRiskModel, Tuple[pd.DataFrame, pd.DataFrame]]:
risk_cov_table, special_risk_table = _map_risk_model_table(risk_model)
cov_risk_cols = [risk_cov_table.__table__.columns[f] for f in total_risk_factors]
query = select([risk_cov_table.FactorID,
risk_cov_table.Factor]
+ cov_risk_cols).where(
risk_cov_table.trade_date == ref_date
)
risk_cov = pd.read_sql(query, self.engine).sort_values('FactorID')
if excluded:
risk_exposure_cols = [RiskExposure.__table__.columns[f] for f in total_risk_factors if
f not in set(excluded)]
else:
risk_exposure_cols = [RiskExposure.__table__.columns[f] for f in total_risk_factors]
big_table = join(RiskExposure,
special_risk_table,
and_(
RiskExposure.code == special_risk_table.code,
RiskExposure.trade_date == special_risk_table.trade_date
))
query = select(
[RiskExposure.code.label("code"), special_risk_table.SRISK.label('srisk')] + risk_exposure_cols) \
.select_from(big_table).where(
and_(RiskExposure.trade_date == ref_date,
RiskExposure.code.in_(codes),
RiskExposure.flag == 1
))
risk_exp = pd.read_sql(query, self.engine).dropna().drop_duplicates(subset=["code"])
if not model_type:
return risk_cov, risk_exp
elif model_type == 'factor':
factor_names = risk_cov.Factor.tolist()
new_risk_cov = risk_cov.set_index('Factor')
factor_cov = new_risk_cov.loc[factor_names, factor_names] / 10000.
new_risk_exp = risk_exp.set_index('code')
factor_loading = new_risk_exp.loc[:, factor_names]
idsync = new_risk_exp['srisk'] * new_risk_exp['srisk'] / 10000
return FactorRiskModel(factor_cov, factor_loading, idsync), risk_cov, risk_exp
def fetch_risk_model_range(self,
universe: Universe,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
risk_model: str = 'short',
excluded: Iterable[str] = None,
model_type: str = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
risk_cov_table, special_risk_table = _map_risk_model_table(risk_model)
cov_risk_cols = [risk_cov_table.__table__.columns[f] for f in total_risk_factors]
cond = risk_cov_table.trade_date.in_(dates) if dates else risk_cov_table.trade_date.between(
start_date,
end_date)
query = select([risk_cov_table.trade_date,
risk_cov_table.FactorID,
risk_cov_table.Factor]
+ cov_risk_cols).where(
cond
)
risk_cov = pd.read_sql(query, self.engine).sort_values(['trade_date', 'FactorID'])
risk_cov["trade_date"] = pd.to_datetime(risk_cov["trade_date"])
if not excluded:
excluded = []
risk_exposure_cols = [RiskExposure.__table__.columns[f] for f in total_risk_factors if
f not in set(excluded)]
cond = universe._query_statements(start_date, end_date, dates)
big_table = join(RiskExposure, UniverseTable,
and_(
RiskExposure.trade_date == UniverseTable.trade_date,
RiskExposure.code == UniverseTable.code,
RiskExposure.flag == 1,
cond
)
)
big_table = join(special_risk_table,
big_table,
and_(
RiskExposure.code == special_risk_table.code,
RiskExposure.trade_date == special_risk_table.trade_date,
))
query = select(
[RiskExposure.trade_date,
RiskExposure.code.label("code"),
special_risk_table.SRISK.label('srisk')] + risk_exposure_cols).select_from(big_table) \
.distinct()
risk_exp = pd.read_sql(query, self.engine).sort_values(['trade_date', 'code']) \
.dropna().drop_duplicates(["trade_date", "code"])
risk_exp["trade_date"] = pd.to_datetime(risk_exp["trade_date"])
if not model_type:
return risk_cov, risk_exp
elif model_type == 'factor':
new_risk_cov = risk_cov.set_index('Factor')
new_risk_exp = risk_exp.set_index('code')
risk_cov_groups = new_risk_cov.groupby('trade_date')
risk_exp_groups = new_risk_exp.groupby('trade_date')
models = dict()
for ref_date, cov_g in risk_cov_groups:
exp_g = risk_exp_groups.get_group(ref_date)
factor_names = cov_g.index.tolist()
factor_cov = cov_g.loc[factor_names, factor_names] / 10000.
factor_loading = exp_g.loc[:, factor_names]
idsync = exp_g['srisk'] * exp_g['srisk'] / 10000
models[ref_date] = FactorRiskModel(factor_cov, factor_loading, idsync)
return pd.Series(models), risk_cov, risk_exp
def fetch_data(self,
ref_date: str,
factors: Iterable[str],
codes: Iterable[int],
benchmark: int = None,
risk_model: str = 'short',
industry: str = 'sw') -> Dict[str, pd.DataFrame]:
total_data = dict()
transformer = Transformer(factors)
factor_data = self.fetch_factor(ref_date,
transformer,
codes,
used_factor_tables=factor_tables)
if benchmark:
benchmark_data = self.fetch_benchmark(ref_date, benchmark)
total_data['benchmark'] = benchmark_data
factor_data = pd.merge(factor_data, benchmark_data, how='left', on=['code'])
factor_data['weight'] = factor_data['weight'].fillna(0.)
if risk_model:
excluded = list(set(total_risk_factors).intersection(transformer.dependency))
risk_cov, risk_exp = self.fetch_risk_model(ref_date, codes, risk_model, excluded)
factor_data = pd.merge(factor_data, risk_exp, how='left', on=['code'])
total_data['risk_cov'] = risk_cov
industry_info = self.fetch_industry(ref_date=ref_date,
codes=codes,
category=industry)
factor_data = pd.merge(factor_data, industry_info, on=['code'])
total_data['factor'] = factor_data
return total_data
def fetch_benchmark(self,
ref_date: str,
benchmark: int,
codes: Iterable[int] = None) -> pd.DataFrame:
benchmark = _map_index_codes[benchmark]
big_table = join(IndexComponent, IndexWeight,
and_(
IndexComponent.trade_date == IndexWeight.trade_date,
IndexComponent.indexSymbol == IndexWeight.indexSymbol,
IndexComponent.symbol == IndexWeight.symbol,
IndexComponent.flag == 1,
IndexWeight.flag == 1
)
)
query = select(
[IndexComponent.code.label("code"),
(IndexWeight.weight / 100.).label('weight')]).select_from(big_table). \
where(
and_(
IndexComponent.trade_date == ref_date,
IndexComponent.indexCode == benchmark,
)
).distinct()
df = pd.read_sql(query, self.engine).drop_duplicates(subset=["code"])
if codes:
df.set_index(['code'], inplace=True)
df = df.reindex(codes).fillna(0.)
df.reset_index(inplace=True)
return df
def fetch_benchmark_range(self,
benchmark: int,
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None) -> pd.DataFrame:
cond = IndexComponent.trade_date.in_(dates) if dates else IndexComponent.trade_date.between(
start_date,
end_date)
benchmark = _map_index_codes[benchmark]
big_table = join(IndexComponent, IndexWeight,
and_(
IndexComponent.trade_date == IndexWeight.trade_date,
IndexComponent.indexSymbol == IndexWeight.indexSymbol,
IndexComponent.symbol == IndexWeight.symbol,
IndexComponent.flag == 1,
IndexWeight.flag == 1
)
)
query = select(
[IndexComponent.trade_date,
IndexComponent.code.label("code"),
(IndexWeight.weight / 100.).label('weight')]).select_from(big_table). \
where(
and_(
cond,
IndexComponent.indexCode == benchmark,
)
).distinct()
df = pd.read_sql(query, self.engine).drop_duplicates(["trade_date", "code"])
df["trade_date"] = pd.to_datetime(df["trade_date"])
return df
def fetch_data(self,
ref_date: str,
factors: Iterable[str],
codes: Iterable[int],
benchmark: int = None,
risk_model: str = 'short',
industry: str = 'sw') -> Dict[str, pd.DataFrame]:
total_data = dict()
transformer = Transformer(factors)
factor_data = self.fetch_factor(ref_date,
transformer,
codes)
if benchmark:
benchmark_data = self.fetch_benchmark(ref_date, benchmark)
total_data['benchmark'] = benchmark_data
factor_data = pd.merge(factor_data, benchmark_data, how='left', on=['code'])
factor_data['weight'] = factor_data['weight'].fillna(0.)
if risk_model:
excluded = list(set(total_risk_factors).intersection(transformer.dependency))
risk_cov, risk_exp = self.fetch_risk_model(ref_date, codes, risk_model, excluded)
factor_data = pd.merge(factor_data, risk_exp, how='left', on=['code'])
total_data['risk_cov'] = risk_cov
industry_info = self.fetch_industry(ref_date=ref_date,
codes=codes,
category=industry)
factor_data = pd.merge(factor_data, industry_info, on=['code'])
total_data['factor'] = factor_data
return total_data
def fetch_data_range(self,
universe: Universe,
factors: Iterable[str],
start_date: str = None,
end_date: str = None,
dates: Iterable[str] = None,
benchmark: int = None,
risk_model: str = 'short',
industry: str = 'sw',
external_data: pd.DataFrame = None) -> Dict[str, pd.DataFrame]:
total_data = {}
transformer = Transformer(factors)
factor_data = self.fetch_factor_range(universe,
transformer,
start_date,
end_date,
dates,
external_data=external_data)
if benchmark:
benchmark_data = self.fetch_benchmark_range(benchmark, start_date, end_date, dates)
total_data['benchmark'] = benchmark_data
factor_data = pd.merge(factor_data, benchmark_data, how='left',
on=['trade_date', 'code'])
factor_data['weight'] = factor_data['weight'].fillna(0.)
if risk_model:
excluded = list(set(total_risk_factors).intersection(transformer.dependency))
risk_cov, risk_exp = self.fetch_risk_model_range(universe, start_date, end_date, dates,
risk_model,
excluded)
factor_data = pd.merge(factor_data, risk_exp, how='left', on=['trade_date', 'code'])
total_data['risk_cov'] = risk_cov
industry_info = self.fetch_industry_range(universe,
start_date=start_date,
end_date=end_date,
dates=dates,
category=industry)
factor_data = pd.merge(factor_data, industry_info, on=['trade_date', 'code'])
total_data['factor'] = factor_data
return total_data
if __name__ == "__main__":
from PyFin.api import makeSchedule
# db_url = "mysql+mysqldb://reader:Reader#[email protected]:13317/vision?charset=utf8"
db_url = "mysql+mysqldb://dxrw:[email protected]:13317/dxtest?charset=utf8"
sql_engine = SqlEngine(db_url=db_url, factor_tables=["factor_momentum"])
universe = Universe("hs300")
start_date = '2020-01-02'
end_date = '2020-02-21'
frequency = "10b"
benchmark = 300
factors = ["EMA5D", "EMV6D"]
ref_dates = makeSchedule(start_date, end_date, frequency, 'china.sse')
print(ref_dates)
df = sql_engine.fetch_factor("2020-02-21", factors=factors, codes=["2010031963"])
print(df)
df = sql_engine.fetch_factor_range(universe=universe, dates=ref_dates, factors=factors)
print(df)
df = sql_engine.fetch_codes_range(start_date=start_date, end_date=end_date, universe=Universe("hs300"))
print(df)
df = sql_engine.fetch_dx_return("2020-10-09", codes=["2010031963"], benchmark=benchmark)
print(df)
df = sql_engine.fetch_dx_return_range(universe, dates=ref_dates, horizon=9, offset=1, benchmark=benchmark)
print(df)
df = sql_engine.fetch_dx_return_index("2020-10-09", index_code=benchmark)
print(df)
df = sql_engine.fetch_dx_return_index_range(start_date=start_date, end_date=end_date, index_code=benchmark, horizon=9, offset=1)
print(df)
df = sql_engine.fetch_benchmark("2020-10-09", benchmark=benchmark)
print(df)
df = sql_engine.fetch_benchmark_range(start_date=start_date, end_date=end_date, benchmark=benchmark)
print(df)
df = sql_engine.fetch_industry(ref_date="2020-10-09", codes=["2010031963"])
print(df)
df = sql_engine.fetch_industry_matrix(ref_date="2020-10-09", codes=["2010031963"])
print(df)
df = sql_engine.fetch_industry_matrix_range(universe=universe,
start_date=start_date,
end_date=end_date)
print(df)
df = sql_engine.fetch_industry_range(start_date=start_date, end_date=end_date, universe=Universe("hs300"))
print(df)
df = sql_engine.fetch_risk_model("2020-02-21", codes=["2010031963"])
print(df)
df = sql_engine.fetch_risk_model("2020-02-21", codes=["2010031963"], model_type="factor")
print(df)
df = sql_engine.fetch_risk_model_range(universe=universe,
start_date=start_date,
end_date=end_date)
print(df)
df = sql_engine.fetch_risk_model_range(universe=universe,
start_date=start_date,
end_date=end_date,
model_type="factor")
print(df)
df = sql_engine.fetch_data("2020-02-11", factors=factors, codes=["2010031963"], benchmark=300)
print(df)
df = sql_engine.fetch_data_range(universe,
factors=factors,
dates=ref_dates,
benchmark=benchmark)["factor"]
print(df)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/engines/sqlengine/mysql.py
|
mysql.py
|
from sqlalchemy import BigInteger, Column, DateTime, Float, Index, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Categories(Base):
__tablename__ = 'categories'
__table_args__ = (
Index('categories_pk', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(BigInteger, primary_key=True, nullable=False)
sw1 = Column(Integer)
sw1_adj = Column(Integer)
class FactorMaster(Base):
__tablename__ = 'factor_master'
__table_args__ = (
Index('factor_master_idx', 'factor', 'source', unique=True),
)
factor = Column(String(30), primary_key=True, nullable=False)
source = Column(String(30), primary_key=True, nullable=False)
alias = Column(String(50), nullable=False)
updateTime = Column(DateTime)
description = Column(Text)
class HaltList(Base):
__tablename__ = 'halt_list'
__table_args__ = (
Index('halt_list_Date_Code_haltBeginTime_uindex', 'trade_date', 'code', 'haltBeginTime',
unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
haltBeginTime = Column(DateTime, primary_key=True, nullable=False)
haltEndTime = Column(DateTime)
secShortName = Column(String(20))
exchangeCD = Column(String(4))
listStatusCD = Column(String(4))
delistDate = Column(DateTime)
assetClass = Column(String(4))
class IndexComponent(Base):
__tablename__ = 'index_components'
__table_args__ = (
Index('index_comp_idx', 'trade_date', 'indexCode', 'code', 'weight'),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
effDate = Column(DateTime)
indexShortName = Column(String(20))
indexCode = Column(Integer, primary_key=True, nullable=False)
secShortName = Column(String(20))
exchangeCD = Column(String(4))
weight = Column(Float(53))
class Industry(Base):
__tablename__ = 'industry'
__table_args__ = (
Index('industry_idx', 'trade_date', 'code', 'industryID', 'industryName1', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
industry = Column(String(30), nullable=False)
industryID = Column(BigInteger, primary_key=True, nullable=False)
industrySymbol = Column(String(20))
industryID1 = Column(BigInteger, nullable=False)
industryName1 = Column(String(50))
industryID2 = Column(BigInteger)
industryName2 = Column(String(50))
industryID3 = Column(BigInteger)
industryName3 = Column(String(50))
IndustryID4 = Column(BigInteger)
IndustryName4 = Column(String(50))
class Market(Base):
__tablename__ = 'market'
__table_args__ = (
Index('market_idx', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
secShortName = Column(String(10))
exchangeCD = Column(String(4))
preClosePrice = Column(Float(53))
actPreClosePrice = Column(Float(53))
openPrice = Column(Float(53))
highestPrice = Column(Float(53))
lowestPrice = Column(Float(53))
closePrice = Column(Float(53))
turnoverVol = Column(BigInteger)
turnoverValue = Column(Float(53))
dealAmount = Column(BigInteger)
turnoverRate = Column(Float(53))
accumAdjFactor = Column(Float(53))
negMarketValue = Column(Float(53))
marketValue = Column(Float(53))
chgPct = Column(Float(53))
PE = Column(Float(53))
PE1 = Column(Float(53))
PB = Column(Float(53))
isOpen = Column(Integer)
vwap = Column(Float(53))
class RiskCovDay(Base):
__tablename__ = 'risk_cov_day'
__table_args__ = (
Index('risk_cov_day_idx', 'trade_date', 'FactorID', 'Factor', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
FactorID = Column(Integer, nullable=False)
Factor = Column(String(50), primary_key=True, nullable=False)
BETA = Column(Float(53))
MOMENTUM = Column(Float(53))
SIZE = Column(Float(53))
EARNYILD = Column(Float(53))
RESVOL = Column(Float(53))
GROWTH = Column(Float(53))
BTOP = Column(Float(53))
LEVERAGE = Column(Float(53))
LIQUIDTY = Column(Float(53))
SIZENL = Column(Float(53))
Bank = Column(Float(53))
RealEstate = Column(Float(53))
Health = Column(Float(53))
Transportation = Column(Float(53))
Mining = Column(Float(53))
NonFerMetal = Column(Float(53))
HouseApp = Column(Float(53))
LeiService = Column(Float(53))
MachiEquip = Column(Float(53))
BuildDeco = Column(Float(53))
CommeTrade = Column(Float(53))
CONMAT = Column(Float(53))
Auto = Column(Float(53))
Textile = Column(Float(53))
FoodBever = Column(Float(53))
Electronics = Column(Float(53))
Computer = Column(Float(53))
LightIndus = Column(Float(53))
Utilities = Column(Float(53))
Telecom = Column(Float(53))
AgriForest = Column(Float(53))
CHEM = Column(Float(53))
Media = Column(Float(53))
IronSteel = Column(Float(53))
NonBankFinan = Column(Float(53))
ELECEQP = Column(Float(53))
AERODEF = Column(Float(53))
Conglomerates = Column(Float(53))
COUNTRY = Column(Float(53))
updateTime = Column(DateTime)
class RiskCovLong(Base):
__tablename__ = 'risk_cov_long'
__table_args__ = (
Index('risk_cov_long_Date_Factor_uindex', 'trade_date', 'Factor', unique=True),
Index('risk_cov_long_Date_FactorID_uindex', 'trade_date', 'FactorID', unique=True)
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
FactorID = Column(Integer)
Factor = Column(String(50), primary_key=True, nullable=False)
BETA = Column(Float(53))
MOMENTUM = Column(Float(53))
SIZE = Column(Float(53))
EARNYILD = Column(Float(53))
RESVOL = Column(Float(53))
GROWTH = Column(Float(53))
BTOP = Column(Float(53))
LEVERAGE = Column(Float(53))
LIQUIDTY = Column(Float(53))
SIZENL = Column(Float(53))
Bank = Column(Float(53))
RealEstate = Column(Float(53))
Health = Column(Float(53))
Transportation = Column(Float(53))
Mining = Column(Float(53))
NonFerMetal = Column(Float(53))
HouseApp = Column(Float(53))
LeiService = Column(Float(53))
MachiEquip = Column(Float(53))
BuildDeco = Column(Float(53))
CommeTrade = Column(Float(53))
CONMAT = Column(Float(53))
Auto = Column(Float(53))
Textile = Column(Float(53))
FoodBever = Column(Float(53))
Electronics = Column(Float(53))
Computer = Column(Float(53))
LightIndus = Column(Float(53))
Utilities = Column(Float(53))
Telecom = Column(Float(53))
AgriForest = Column(Float(53))
CHEM = Column(Float(53))
Media = Column(Float(53))
IronSteel = Column(Float(53))
NonBankFinan = Column(Float(53))
ELECEQP = Column(Float(53))
AERODEF = Column(Float(53))
Conglomerates = Column(Float(53))
COUNTRY = Column(Float(53))
updateTime = Column(DateTime)
class RiskCovShort(Base):
__tablename__ = 'risk_cov_short'
__table_args__ = (
Index('risk_cov_short_Date_FactorID_uindex', 'trade_date', 'FactorID', unique=True),
Index('risk_cov_short_Date_Factor_uindex', 'trade_date', 'Factor', unique=True)
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
FactorID = Column(Integer)
Factor = Column(String(50), primary_key=True, nullable=False)
BETA = Column(Float(53))
MOMENTUM = Column(Float(53))
SIZE = Column(Float(53))
EARNYILD = Column(Float(53))
RESVOL = Column(Float(53))
GROWTH = Column(Float(53))
BTOP = Column(Float(53))
LEVERAGE = Column(Float(53))
LIQUIDTY = Column(Float(53))
SIZENL = Column(Float(53))
Bank = Column(Float(53))
RealEstate = Column(Float(53))
Health = Column(Float(53))
Transportation = Column(Float(53))
Mining = Column(Float(53))
NonFerMetal = Column(Float(53))
HouseApp = Column(Float(53))
LeiService = Column(Float(53))
MachiEquip = Column(Float(53))
BuildDeco = Column(Float(53))
CommeTrade = Column(Float(53))
CONMAT = Column(Float(53))
Auto = Column(Float(53))
Textile = Column(Float(53))
FoodBever = Column(Float(53))
Electronics = Column(Float(53))
Computer = Column(Float(53))
LightIndus = Column(Float(53))
Utilities = Column(Float(53))
Telecom = Column(Float(53))
AgriForest = Column(Float(53))
CHEM = Column(Float(53))
Media = Column(Float(53))
IronSteel = Column(Float(53))
NonBankFinan = Column(Float(53))
ELECEQP = Column(Float(53))
AERODEF = Column(Float(53))
Conglomerates = Column(Float(53))
COUNTRY = Column(Float(53))
updateTime = Column(DateTime)
class RiskExposure(Base):
__tablename__ = 'risk_exposure'
__table_args__ = (
Index('risk_exposure_idx', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
BETA = Column(Float(53))
MOMENTUM = Column(Float(53))
SIZE = Column(Float(53))
EARNYILD = Column(Float(53))
RESVOL = Column(Float(53))
GROWTH = Column(Float(53))
BTOP = Column(Float(53))
LEVERAGE = Column(Float(53))
LIQUIDTY = Column(Float(53))
SIZENL = Column(Float(53))
Bank = Column(BigInteger)
RealEstate = Column(BigInteger)
Health = Column(BigInteger)
Transportation = Column(BigInteger)
Mining = Column(BigInteger)
NonFerMetal = Column(BigInteger)
HouseApp = Column(BigInteger)
LeiService = Column(BigInteger)
MachiEquip = Column(BigInteger)
BuildDeco = Column(BigInteger)
CommeTrade = Column(BigInteger)
CONMAT = Column(BigInteger)
Auto = Column(BigInteger)
Textile = Column(BigInteger)
FoodBever = Column(BigInteger)
Electronics = Column(BigInteger)
Computer = Column(BigInteger)
LightIndus = Column(BigInteger)
Utilities = Column(BigInteger)
Telecom = Column(BigInteger)
AgriForest = Column(BigInteger)
CHEM = Column(BigInteger)
Media = Column(BigInteger)
IronSteel = Column(BigInteger)
NonBankFinan = Column(BigInteger)
ELECEQP = Column(BigInteger)
AERODEF = Column(BigInteger)
Conglomerates = Column(BigInteger)
COUNTRY = Column(BigInteger)
class RiskMaster(Base):
__tablename__ = 'risk_master'
factor = Column(String(30), nullable=False, unique=True)
source = Column(String(30), nullable=False)
alias = Column(String(30), nullable=False)
type = Column(String(30))
updateTime = Column(DateTime)
description = Column(Text)
FactorID = Column(Integer, primary_key=True, unique=True)
vendor = Column(String(30))
class RiskReturn(Base):
__tablename__ = 'risk_return'
trade_date = Column(DateTime, primary_key=True, unique=True)
BETA = Column(Float(53))
MOMENTUM = Column(Float(53))
SIZE = Column(Float(53))
EARNYILD = Column(Float(53))
RESVOL = Column(Float(53))
GROWTH = Column(Float(53))
BTOP = Column(Float(53))
LEVERAGE = Column(Float(53))
LIQUIDTY = Column(Float(53))
SIZENL = Column(Float(53))
Bank = Column(Float(53))
RealEstate = Column(Float(53))
Health = Column(Float(53))
Transportation = Column(Float(53))
Mining = Column(Float(53))
NonFerMetal = Column(Float(53))
HouseApp = Column(Float(53))
LeiService = Column(Float(53))
MachiEquip = Column(Float(53))
BuildDeco = Column(Float(53))
CommeTrade = Column(Float(53))
CONMAT = Column(Float(53))
Auto = Column(Float(53))
Textile = Column(Float(53))
FoodBever = Column(Float(53))
Electronics = Column(Float(53))
Computer = Column(Float(53))
LightIndus = Column(Float(53))
Utilities = Column(Float(53))
Telecom = Column(Float(53))
AgriForest = Column(Float(53))
CHEM = Column(Float(53))
Media = Column(Float(53))
IronSteel = Column(Float(53))
NonBankFinan = Column(Float(53))
ELECEQP = Column(Float(53))
AERODEF = Column(Float(53))
Conglomerates = Column(Float(53))
COUNTRY = Column(Float(53))
updateTime = Column(DateTime)
class SecurityMaster(Base):
__tablename__ = 'security_master'
exchangeCD = Column(String(4))
ListSectorCD = Column(BigInteger)
ListSector = Column(String(6))
transCurrCD = Column(Text)
secShortName = Column(String(10))
secFullName = Column(Text)
listStatusCD = Column(String(2))
listDate = Column(DateTime)
delistDate = Column(DateTime)
equTypeCD = Column(String(4))
equType = Column(String(10))
exCountryCD = Column(String(3))
partyID = Column(BigInteger)
totalShares = Column(Float(53))
nonrestFloatShares = Column(Float(53))
nonrestfloatA = Column(Float(53))
officeAddr = Column(Text)
primeOperating = Column(Text)
endDate = Column(DateTime)
TShEquity = Column(Float(53))
code = Column(Integer, primary_key=True, unique=True)
class SpecificReturn(Base):
__tablename__ = 'specific_return'
__table_args__ = (
Index('specific_return_Date_Code_uindex', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
exchangeCD = Column(String(4))
secShortName = Column(String(20))
spret = Column(Float(53))
updateTime = Column(DateTime)
class SpecificRiskDay(Base):
__tablename__ = 'specific_risk_day'
__table_args__ = (
Index('specific_risk_day_Date_Code_uindex', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
exchangeCD = Column(String(4))
secShortName = Column(String(20))
SRISK = Column(Float(53))
updateTime = Column(DateTime)
class SpecificRiskLong(Base):
__tablename__ = 'specific_risk_long'
__table_args__ = (
Index('specific_risk_long_Date_Code_uindex', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
exchangeCD = Column(String(4))
secShortName = Column(String(20))
updateTime = Column(DateTime)
SRISK = Column(Float(53))
class SpecificRiskShort(Base):
__tablename__ = 'specific_risk_short'
__table_args__ = (
Index('specific_risk_short_Date_Code_uindex', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(Integer, primary_key=True, nullable=False)
exchangeCD = Column(String(4))
secShortName = Column(String(20))
SRISK = Column(Float(53))
updateTime = Column(DateTime)
class Universe(Base):
__tablename__ = 'universe'
__table_args__ = (
Index('universe_trade_date_code_uindex', 'trade_date', 'code', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
code = Column(BigInteger, primary_key=True, nullable=False)
aerodef = Column(Integer)
agriforest = Column(Integer)
auto = Column(Integer)
bank = Column(Integer)
builddeco = Column(Integer)
chem = Column(Integer)
conmat = Column(Integer)
commetrade = Column(Integer)
computer = Column(Integer)
conglomerates = Column(Integer)
eleceqp = Column(Integer)
electronics = Column(Integer)
foodbever = Column(Integer)
health = Column(Integer)
houseapp = Column(Integer)
ironsteel = Column(Integer)
leiservice = Column(Integer)
lightindus = Column(Integer)
machiequip = Column(Integer)
media = Column(Integer)
mining = Column(Integer)
nonbankfinan = Column(Integer)
nonfermetal = Column(Integer)
realestate = Column(Integer)
telecom = Column(Integer)
textile = Column(Integer)
transportation = Column(Integer)
utilities = Column(Integer)
ashare = Column(Integer)
ashare_ex = Column(Integer)
cyb = Column(Integer)
hs300 = Column(Integer)
sh50 = Column(Integer)
zxb = Column(Integer)
zz1000 = Column(Integer)
zz500 = Column(Integer)
zz800 = Column(Integer)
hs300_adj = Column(Integer)
zz500_adj = Column(Integer)
class IndexMarket(Base):
__tablename__ = 'index_market'
__table_args__ = (
Index('index_market_idx', 'trade_date', 'indexCode', unique=True),
)
trade_date = Column(DateTime, primary_key=True, nullable=False)
indexCode = Column(Integer, primary_key=True, nullable=False)
preCloseIndex = Column(Float(53))
openIndex = Column(Float(53))
highestIndex = Column(Float(53))
lowestIndex = Column(Float(53))
closeIndex = Column(Float(53))
turnoverVol = Column(Float(53))
turnoverValue = Column(Float(53))
chgPct = Column(Float(53))
if __name__ == '__main__':
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://alpha:[email protected]:8890/alpha')
Base.metadata.create_all(engine)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/dbmodel/models/postgres.py
|
postgres.py
|
from sqlalchemy import (
Column,
INT,
FLOAT,
Date,
Index,
Text,
text
)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class _StkDailyPricePro(Base):
__tablename__ = 'stk_daily_price_pro'
__table_args__ = (
Index('unique_stk_daily_price_pro_index', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
code = Column("security_code", Text)
chgPct = Column("change_pct", FLOAT)
secShortName = Column("name", Text)
is_valid = Column(INT, nullable=False)
flag = Column(INT)
is_verify = Column(INT)
class _IndexDailyPrice(Base):
__tablename__ = 'index_daily_price'
__table_args__ = (
Index('unique_index_daily_price_index', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
indexCode = Column("security_code", Text)
chgPct = Column("change_pct", FLOAT)
secShortName = Column("name", Text)
is_valid = Column(INT, nullable=False)
flag = Column(INT)
is_verify = Column(INT)
class _Index(Base):
__tablename__ = "index"
__table_args__ = (
Index('unique_index_index', 'trade_date', 'isymbol', 'symbol', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
indexSymbol = Column("isymbol", Text)
symbol = Column(Text)
weight = Column("weighing", FLOAT)
flag = Column(INT)
class _IndexComponent(Base):
__tablename__ = "index_component"
__table_args__ = (
Index('unique_index_index', 'trade_date', 'isecurity_code', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date)
indexSymbol = Column("isymbol", Text)
symbol = Column(Text)
indexCode = Column("isecurity_code", Text)
code = Column("security_code", Text)
flag = Column(INT)
class _StkUniverse(Base):
__tablename__ = 'stk_universe'
__table_args__ = (
Index('unique_stk_universe_index', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
aerodef = Column(INT, server_default=text("'0'"))
agriforest = Column(INT, server_default=text("'0'"))
auto = Column(INT, server_default=text("'0'"))
bank = Column(INT, server_default=text("'0'"))
builddeco = Column(INT, server_default=text("'0'"))
chem = Column(INT, server_default=text("'0'"))
conmat = Column(INT, server_default=text("'0'"))
commetrade = Column(INT, server_default=text("'0'"))
computer = Column(INT, server_default=text("'0'"))
conglomerates = Column(INT, server_default=text("'0'"))
eleceqp = Column(INT, server_default=text("'0'"))
electronics = Column(INT, server_default=text("'0'"))
foodbever = Column(INT, server_default=text("'0'"))
health = Column(INT, server_default=text("'0'"))
houseapp = Column(INT, server_default=text("'0'"))
ironsteel = Column(INT, server_default=text("'0'"))
leiservice = Column(INT, server_default=text("'0'"))
lightindus = Column(INT, server_default=text("'0'"))
machiequip = Column(INT, server_default=text("'0'"))
media = Column(INT, server_default=text("'0'"))
mining = Column(INT, server_default=text("'0'"))
nonbankfinan = Column(INT, server_default=text("'0'"))
nonfermetal = Column(INT, server_default=text("'0'"))
realestate = Column(INT, server_default=text("'0'"))
telecom = Column(INT, server_default=text("'0'"))
textile = Column(INT, server_default=text("'0'"))
transportation = Column(INT, server_default=text("'0'"))
utilities = Column(INT, server_default=text("'0'"))
ashare = Column(INT, server_default=text("'0'"))
ashare_ex = Column(INT, server_default=text("'0'"))
cyb = Column(INT, server_default=text("'0'"))
hs300 = Column(INT, server_default=text("'0'"))
sh50 = Column(INT, server_default=text("'0'"))
zxb = Column(INT, server_default=text("'0'"))
zz1000 = Column(INT, server_default=text("'0'"))
zz500 = Column(INT, server_default=text("'0'"))
zz800 = Column(INT, server_default=text("'0'"))
flag = Column(INT)
is_verify = Column(INT)
class _SwIndustryDaily(Base):
__tablename__ = 'sw_industry_daily'
__table_args__ = (
Index('sw_industry_daily_uindex', 'trade_date', 'industry_code1', 'symbol', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
symbol = Column(Text, nullable=False)
company_id = Column(Text, nullable=False)
code = Column("security_code", Text, nullable=False)
sname = Column(Text, nullable=False)
industry_code1 = Column(Text, nullable=False)
industry_name1 = Column(Text)
industry_code2 = Column(Text)
industry_name2 = Column(Text)
industry_code3 = Column(Text)
industry_name3 = Column(Text)
Industry_code4 = Column(Text)
Industry_name4 = Column(Text)
flag = Column(INT)
is_verify = Column(INT)
class _RiskExposure(Base):
__tablename__ = 'risk_exposure'
__table_args__ = (
Index('risk_exposure_idx', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(INT)
RealEstate = Column(INT)
Health = Column(INT)
Transportation = Column(INT)
Mining = Column(INT)
NonFerMetal = Column(INT)
HouseApp = Column(INT)
LeiService = Column(INT)
MachiEquip = Column(INT)
BuildDeco = Column(INT)
CommeTrade = Column(INT)
CONMAT = Column(INT)
Auto = Column(INT)
Textile = Column(INT)
FoodBever = Column(INT)
Electronics = Column(INT)
Computer = Column(INT)
LightIndus = Column(INT)
Utilities = Column(INT)
Telecom = Column(INT)
AgriForest = Column(INT)
CHEM = Column(INT)
Media = Column(INT)
IronSteel = Column(INT)
NonBankFinan = Column(INT)
ELECEQP = Column(INT)
AERODEF = Column(INT)
Conglomerates = Column(INT)
COUNTRY = Column(INT)
flag = Column(INT)
class _RiskCovDay(Base):
__tablename__ = 'risk_cov_day'
__table_args__ = (
Index('risk_cov_day_idx', 'trade_date', 'FactorID', 'Factor', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
FactorID = Column(INT, nullable=False)
Factor = Column(Text, nullable=False)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(FLOAT)
RealEstate = Column(FLOAT)
Health = Column(FLOAT)
Transportation = Column(FLOAT)
Mining = Column(FLOAT)
NonFerMetal = Column(FLOAT)
HouseApp = Column(FLOAT)
LeiService = Column(FLOAT)
MachiEquip = Column(FLOAT)
BuildDeco = Column(FLOAT)
CommeTrade = Column(FLOAT)
CONMAT = Column(FLOAT)
Auto = Column(FLOAT)
Textile = Column(FLOAT)
FoodBever = Column(FLOAT)
Electronics = Column(FLOAT)
Computer = Column(FLOAT)
LightIndus = Column(FLOAT)
Utilities = Column(FLOAT)
Telecom = Column(FLOAT)
AgriForest = Column(FLOAT)
CHEM = Column(FLOAT)
Media = Column(FLOAT)
IronSteel = Column(FLOAT)
NonBankFinan = Column(FLOAT)
ELECEQP = Column(FLOAT)
AERODEF = Column(FLOAT)
Conglomerates = Column(FLOAT)
COUNTRY = Column(FLOAT)
class _RiskCovLong(Base):
__tablename__ = 'risk_cov_long'
__table_args__ = (
Index('risk_cov_long_Date_Factor_uindex', 'trade_date', 'Factor', unique=True),
Index('risk_cov_long_Date_FactorID_uindex', 'trade_date', 'FactorID', unique=True)
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
FactorID = Column(INT)
Factor = Column(Text, nullable=False)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(FLOAT)
RealEstate = Column(FLOAT)
Health = Column(FLOAT)
Transportation = Column(FLOAT)
Mining = Column(FLOAT)
NonFerMetal = Column(FLOAT)
HouseApp = Column(FLOAT)
LeiService = Column(FLOAT)
MachiEquip = Column(FLOAT)
BuildDeco = Column(FLOAT)
CommeTrade = Column(FLOAT)
CONMAT = Column(FLOAT)
Auto = Column(FLOAT)
Textile = Column(FLOAT)
FoodBever = Column(FLOAT)
Electronics = Column(FLOAT)
Computer = Column(FLOAT)
LightIndus = Column(FLOAT)
Utilities = Column(FLOAT)
Telecom = Column(FLOAT)
AgriForest = Column(FLOAT)
CHEM = Column(FLOAT)
Media = Column(FLOAT)
IronSteel = Column(FLOAT)
NonBankFinan = Column(FLOAT)
ELECEQP = Column(FLOAT)
AERODEF = Column(FLOAT)
Conglomerates = Column(FLOAT)
COUNTRY = Column(FLOAT)
class _RiskCovShort(Base):
__tablename__ = 'risk_cov_short'
__table_args__ = (
Index('risk_cov_short_Date_FactorID_uindex', 'trade_date', 'FactorID', unique=True),
Index('risk_cov_short_Date_Factor_uindex', 'trade_date', 'Factor', unique=True)
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
FactorID = Column(INT)
Factor = Column(Text, nullable=False)
BETA = Column(FLOAT)
MOMENTUM = Column(FLOAT)
SIZE = Column(FLOAT)
EARNYILD = Column(FLOAT)
RESVOL = Column(FLOAT)
GROWTH = Column(FLOAT)
BTOP = Column(FLOAT)
LEVERAGE = Column(FLOAT)
LIQUIDTY = Column(FLOAT)
SIZENL = Column(FLOAT)
Bank = Column(FLOAT)
RealEstate = Column(FLOAT)
Health = Column(FLOAT)
Transportation = Column(FLOAT)
Mining = Column(FLOAT)
NonFerMetal = Column(FLOAT)
HouseApp = Column(FLOAT)
LeiService = Column(FLOAT)
MachiEquip = Column(FLOAT)
BuildDeco = Column(FLOAT)
CommeTrade = Column(FLOAT)
CONMAT = Column(FLOAT)
Auto = Column(FLOAT)
Textile = Column(FLOAT)
FoodBever = Column(FLOAT)
Electronics = Column(FLOAT)
Computer = Column(FLOAT)
LightIndus = Column(FLOAT)
Utilities = Column(FLOAT)
Telecom = Column(FLOAT)
AgriForest = Column(FLOAT)
CHEM = Column(FLOAT)
Media = Column(FLOAT)
IronSteel = Column(FLOAT)
NonBankFinan = Column(FLOAT)
ELECEQP = Column(FLOAT)
AERODEF = Column(FLOAT)
Conglomerates = Column(FLOAT)
COUNTRY = Column(FLOAT)
class _SpecificRiskDay(Base):
__tablename__ = 'specific_risk_day'
__table_args__ = (
Index('specific_risk_day_Date_Code_uindex', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
exchangeCD = Column(Text)
secShortName = Column(Text)
SRISK = Column(FLOAT)
class _SpecificRiskLong(Base):
__tablename__ = 'specific_risk_long'
__table_args__ = (
Index('specific_risk_long_Date_Code_uindex', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
exchangeCD = Column(Text)
secShortName = Column(Text)
SRISK = Column(FLOAT)
class _SpecificRiskShort(Base):
__tablename__ = 'specific_risk_short'
__table_args__ = (
Index('specific_risk_short_Date_Code_uindex', 'trade_date', 'security_code', unique=True),
)
id = Column(INT, primary_key=True)
trade_date = Column(Date, nullable=False)
code = Column("security_code", Text, nullable=False)
exchangeCD = Column(Text)
secShortName = Column(Text)
SRISK = Column(FLOAT)
# Factor tables
class _FactorMomentum(Base):
__tablename__ = 'factor_momentum'
__table_args__ = (
Index('factor_momentum_uindex', 'trade_date', 'security_code', 'flag', unique=True),
)
id = Column(INT, primary_key=True)
code = Column("security_code", Text, nullable=False)
trade_date = Column(Date, nullable=False)
ADX14D = Column(FLOAT)
ADXR14D = Column(FLOAT)
APBMA5D = Column(FLOAT)
ARC50D = Column(FLOAT)
BBI = Column(FLOAT)
BIAS10D = Column(FLOAT)
BIAS20D = Column(FLOAT)
BIAS5D = Column(FLOAT)
BIAS60D = Column(FLOAT)
CCI10D = Column(FLOAT)
CCI20D = Column(FLOAT)
CCI5D = Column(FLOAT)
CCI88D = Column(FLOAT)
ChgTo1MAvg = Column(FLOAT)
ChgTo1YAvg = Column(FLOAT)
ChgTo3MAvg = Column(FLOAT)
ChkOsci3D10D = Column(FLOAT)
ChkVol10D = Column(FLOAT)
DEA = Column(FLOAT)
EMA10D = Column(FLOAT)
EMA120D = Column(FLOAT)
EMA12D = Column(FLOAT)
EMA20D = Column(FLOAT)
EMA26D = Column(FLOAT)
EMA5D = Column(FLOAT)
EMA60D = Column(FLOAT)
EMV14D = Column(FLOAT)
EMV6D = Column(FLOAT)
Fiftytwoweekhigh = Column(FLOAT)
HT_TRENDLINE = Column(FLOAT)
KAMA10D = Column(FLOAT)
MA10Close = Column(FLOAT)
MA10D = Column(FLOAT)
MA10RegressCoeff12 = Column(FLOAT)
MA10RegressCoeff6 = Column(FLOAT)
MA120D = Column(FLOAT)
MA20D = Column(FLOAT)
MA5D = Column(FLOAT)
MA60D = Column(FLOAT)
MACD12D26D = Column(FLOAT)
MIDPOINT10D = Column(FLOAT)
MIDPRICE10D = Column(FLOAT)
MTM10D = Column(FLOAT)
PLRC12D = Column(FLOAT)
PLRC6D = Column(FLOAT)
PM10D = Column(FLOAT)
PM120D = Column(FLOAT)
PM20D = Column(FLOAT)
PM250D = Column(FLOAT)
PM5D = Column(FLOAT)
PM60D = Column(FLOAT)
PMDif5D20D = Column(FLOAT)
PMDif5D60D = Column(FLOAT)
RCI12D = Column(FLOAT)
RCI24D = Column(FLOAT)
SAR = Column(FLOAT)
SAREXT = Column(FLOAT)
SMA15D = Column(FLOAT)
TEMA10D = Column(FLOAT)
TEMA5D = Column(FLOAT)
TRIMA10D = Column(FLOAT)
TRIX10D = Column(FLOAT)
TRIX5D = Column(FLOAT)
UOS7D14D28D = Column(FLOAT)
WMA10D = Column(FLOAT)
flag = Column(INT)
class _FactorValuationEstimation(Base):
__tablename__ = 'factor_valuation_estimation'
id = Column(INT, primary_key=True)
code = Column("security_code", Text, nullable=False)
trade_date = Column(Date, nullable=False)
BMInduAvgOnSW1 = Column(FLOAT)
BMInduSTDOnSW1 = Column(FLOAT)
BookValueToIndu = Column(FLOAT)
CEToPTTM = Column(FLOAT)
DivYieldTTM = Column(FLOAT)
EPTTM = Column(FLOAT)
LogTotalAssets = Column(FLOAT)
LogofMktValue = Column(FLOAT)
LogofNegMktValue = Column(FLOAT)
MktValue = Column(FLOAT)
MrktCapToCorFreeCashFlow = Column(FLOAT)
OptIncToEnterpriseValueTTM = Column(FLOAT)
PBAvgOnSW1 = Column(FLOAT)
PBIndu = Column(FLOAT)
PBStdOnSW1 = Column(FLOAT)
PCFAvgOnSW1 = Column(FLOAT)
PCFIndu = Column(FLOAT)
PCFStdOnSW1 = Column(FLOAT)
PCFToNetCashflowTTM = Column(FLOAT)
PCFToOptCashflowTTM = Column(FLOAT)
PEAvgOnSW1 = Column(FLOAT)
PECutTTM = Column(FLOAT)
PEG3YTTM = Column(FLOAT)
PEG5YTTM = Column(FLOAT)
PEIndu = Column(FLOAT)
PEStdOnSW1 = Column(FLOAT)
PETTM = Column(FLOAT)
PEToAvg1M = Column(FLOAT)
PEToAvg1Y = Column(FLOAT)
PEToAvg3M = Column(FLOAT)
PEToAvg6M = Column(FLOAT)
PSAvgOnSW1 = Column(FLOAT)
PSIndu = Column(FLOAT)
PSStdOnSW1 = Column(FLOAT)
PSTTM = Column(FLOAT)
RevToMrktRatioTTM = Column(FLOAT)
TotalAssetsToEnterpriseValue = Column(FLOAT)
TotalMrktToEBIDAOnSW1 = Column(FLOAT)
TotalMrktToEBIDAOnSW1TTM = Column(FLOAT)
TotalMrktToEBIDATTM = Column(FLOAT)
TotalMrktToEBIDATTMRev = Column(FLOAT)
flag = Column(INT)
class _FactorVolatilityValue(Base):
__tablename__ = 'factor_volatility_value'
id = Column(INT, primary_key=True)
code = Column("security_code", Text, nullable=False)
trade_date = Column(Date, nullable=False)
Alpha120D = Column(FLOAT)
Alpha20D = Column(FLOAT)
Alpha60D = Column(FLOAT)
Beta120D = Column(FLOAT)
Beta20D = Column(FLOAT)
Beta252D = Column(FLOAT)
Beta60D = Column(FLOAT)
DDNCR12M = Column(FLOAT)
DDNSR12M = Column(FLOAT)
DVRAT = Column(FLOAT)
DailyReturnSTD252D = Column(FLOAT)
GainLossVarianceRatio120D = Column(FLOAT)
GainLossVarianceRatio20D = Column(FLOAT)
GainLossVarianceRatio60D = Column(FLOAT)
GainVariance120D = Column(FLOAT)
GainVariance20D = Column(FLOAT)
GainVariance60D = Column(FLOAT)
IR120D = Column(FLOAT)
IR20D = Column(FLOAT)
IR60D = Column(FLOAT)
Kurtosis120D = Column(FLOAT)
Kurtosis20D = Column(FLOAT)
Kurtosis60D = Column(FLOAT)
LossVariance120D = Column(FLOAT)
LossVariance20D = Column(FLOAT)
LossVariance60D = Column(FLOAT)
Sharpe120D = Column(FLOAT)
Sharpe20D = Column(FLOAT)
Sharpe60D = Column(FLOAT)
TreynorRatio120D = Column(FLOAT)
TreynorRatio20D = Column(FLOAT)
TreynorRatio60D = Column(FLOAT)
Variance120D = Column(FLOAT)
Variance20D = Column(FLOAT)
Variance60D = Column(FLOAT)
flag = Column(INT)
Market = _StkDailyPricePro
IndexMarket = _IndexDailyPrice
Universe = _StkUniverse
Industry = _SwIndustryDaily
RiskExposure = _RiskExposure
RiskCovDay = _RiskCovDay
RiskCovShort = _RiskCovShort
RiskCovLong = _RiskCovLong
SpecificRiskDay = _SpecificRiskDay
SpecificRiskShort = _SpecificRiskShort
SpecificRiskLong = _SpecificRiskLong
IndexComponent = _IndexComponent
IndexWeight = _Index
FactorMomentum = _FactorMomentum
FactorValuationEstimation = _FactorValuationEstimation
FactorVolatilityValue = _FactorVolatilityValue
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/data/dbmodel/models/mysql.py
|
mysql.py
|
from alphamind.benchmarks.data.neutralize import benchmark_neutralize
from alphamind.benchmarks.data.neutralize import benchmark_neutralize_with_groups
from alphamind.benchmarks.data.standardize import benchmark_standardize
from alphamind.benchmarks.data.standardize import benchmark_standardize_with_group
from alphamind.benchmarks.data.winsorize import benchmark_winsorize_normal
from alphamind.benchmarks.data.winsorize import benchmark_winsorize_normal_with_group
from alphamind.benchmarks.portfolio.linearbuild import benchmark_build_linear
from alphamind.benchmarks.portfolio.percentbuild import benchmark_build_percent
from alphamind.benchmarks.portfolio.percentbuild import benchmark_build_percent_with_group
from alphamind.benchmarks.portfolio.rankbuild import benchmark_build_rank
from alphamind.benchmarks.portfolio.rankbuild import benchmark_build_rank_with_group
from alphamind.benchmarks.settlement.simplesettle import benchmark_simple_settle
from alphamind.benchmarks.settlement.simplesettle import benchmark_simple_settle_with_group
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
benchmark_neutralize(30, 3, 50000)
benchmark_neutralize_with_groups(30, 3, 50000, 3)
benchmark_neutralize(50000, 50, 20)
benchmark_neutralize_with_groups(50000, 50, 20, 50)
benchmark_standardize(3000, 10, 1000)
benchmark_standardize_with_group(3000, 10, 1000, 30)
benchmark_standardize(100, 10, 50000)
benchmark_standardize_with_group(100, 10, 5000, 4)
benchmark_standardize(50000, 50, 20)
benchmark_standardize_with_group(50000, 50, 20, 50)
benchmark_winsorize_normal(3000, 10, 1000)
benchmark_winsorize_normal_with_group(3000, 10, 1000, 30)
benchmark_winsorize_normal(30, 10, 50000)
benchmark_winsorize_normal_with_group(30, 10, 5000, 5)
benchmark_winsorize_normal(50000, 50, 20)
benchmark_winsorize_normal_with_group(50000, 50, 20, 50)
benchmark_build_rank(3000, 1000, 300)
benchmark_build_rank_with_group(3000, 1000, 10, 30)
benchmark_build_rank(30, 50000, 3)
benchmark_build_rank_with_group(30, 50000, 1, 3)
benchmark_build_rank(50000, 20, 3000)
benchmark_build_rank_with_group(50000, 20, 10, 300)
benchmark_build_percent(3000, 1000, 0.1)
benchmark_build_percent_with_group(3000, 1000, 0.1, 30)
benchmark_build_percent(30, 50000, 0.1)
benchmark_build_percent_with_group(30, 50000, 0.1, 3)
benchmark_build_percent(50000, 20, 0.1)
benchmark_build_percent_with_group(50000, 20, 0.1, 300)
benchmark_build_linear(100, 3, 100)
benchmark_build_linear(1000, 30, 10)
benchmark_simple_settle(3000, 10, 1000)
benchmark_simple_settle_with_group(3000, 10, 1000, 30)
benchmark_simple_settle(30, 10, 50000)
benchmark_simple_settle_with_group(30, 10, 50000, 5)
benchmark_simple_settle(50000, 50, 20)
benchmark_simple_settle_with_group(50000, 50, 20, 50)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/benchmarks.py
|
benchmarks.py
|
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.settlement.simplesettle import simple_settle
def benchmark_simple_settle(n_samples: int, n_portfolios: int, n_loops: int) -> None:
print("-" * 60)
print("Starting simple settle benchmarking")
print("Parameters(n_samples: {0}, n_portfolios: {1}, n_loops: {2})".format(n_samples,
n_portfolios,
n_loops))
weights = np.random.randn(n_samples, n_portfolios)
ret_series = np.random.randn(n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
calc_ret = simple_settle(weights, ret_series)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
ret_series.shape = -1, 1
for _ in range(n_loops):
exp_ret = (weights * ret_series).sum(axis=0)
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_ret, exp_ret)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_simple_settle_with_group(n_samples: int, n_portfolios: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting simple settle with group-by values benchmarking")
print("Parameters(n_samples: {0}, n_portfolios: {1}, n_loops: {2}, n_groups: {3})".format(
n_samples, n_portfolios, n_loops, n_groups))
weights = np.random.randn(n_samples, n_portfolios)
ret_series = np.random.randn(n_samples)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
calc_ret = simple_settle(weights, ret_series, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
ret_series.shape = -1, 1
for _ in range(n_loops):
ret_mat = weights * ret_series
exp_ret = pd.DataFrame(ret_mat).groupby(groups).sum().values
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_ret, exp_ret)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_simple_settle(3000, 3, 1000)
benchmark_simple_settle_with_group(3000, 3, 1000, 30)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/settlement/simplesettle.py
|
simplesettle.py
|
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.data.winsorize import winsorize_normal
def benchmark_winsorize_normal(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting winsorize normal benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
num_stds = 2
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
_ = winsorize_normal(x, num_stds)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
def impl(x):
std_values = x.std(axis=0)
mean_value = x.mean(axis=0)
lower_bound = mean_value - num_stds * std_values
upper_bound = mean_value + num_stds * std_values
res = np.where(x > upper_bound, upper_bound, x)
res = np.where(res < lower_bound, lower_bound, res)
return res
start = dt.datetime.now()
for _ in range(n_loops):
_ = impl(x)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_winsorize_normal_with_group(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting winsorize normal with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
num_stds = 2
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = winsorize_normal(x, num_stds, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
def impl(x):
std_values = x.std(axis=0)
mean_value = x.mean(axis=0)
lower_bound = mean_value - num_stds * std_values
upper_bound = mean_value + num_stds * std_values
res = np.where(x > upper_bound, upper_bound, x)
res = np.where(res < lower_bound, lower_bound, res)
return res
start = dt.datetime.now()
for _ in range(n_loops):
_ = pd.DataFrame(x).groupby(groups).transform(impl)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_winsorize_normal(3000, 10, 1000)
benchmark_winsorize_normal_with_group(3000, 10, 1000, 30)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/data/winsorize.py
|
winsorize.py
|
import datetime as dt
import numpy as np
from sklearn.linear_model import LinearRegression
from alphamind.data.neutralize import neutralize
def benchmark_neutralize(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting least square fitting benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
calc_res = neutralize(x, y)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
benchmark_model = LinearRegression(fit_intercept=False)
benchmark_model.fit(x, y)
exp_res = y - x @ benchmark_model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
np.testing.assert_array_almost_equal(calc_res, exp_res)
def benchmark_neutralize_with_groups(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting least square fitting with group benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = neutralize(x, y, groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
model = LinearRegression(fit_intercept=False)
for _ in range(n_loops):
for i in range(n_groups):
curr_x = x[groups == i]
curr_y = y[groups == i]
model.fit(curr_x, curr_y)
_ = curr_y - curr_x @ model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/data/neutralize.py
|
neutralize.py
|
import datetime as dt
import numpy as np
import pandas as pd
from scipy.stats import zscore
from alphamind.data.standardize import standardize
def benchmark_standardize(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting standardizing benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
_ = standardize(x)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
_ = zscore(x)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_standardize_with_group(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting standardizing with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = standardize(x, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
_ = pd.DataFrame(x).groupby(groups).transform(
lambda s: (s - s.mean(axis=0)) / s.std(axis=0))
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_standardize(3000, 10, 1000)
benchmark_standardize_with_group(3000, 10, 1000, 30)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/data/standardize.py
|
standardize.py
|
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.portfolio.percentbuilder import percent_build
def benchmark_build_percent(n_samples: int, n_loops: int, p_included: float) -> None:
print("-" * 60)
print("Starting portfolio construction by percent benchmarking")
print("Parameters(n_samples: {0}, p_included: {1}, n_loops: {2})".format(n_samples, p_included,
n_loops))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = percent_build(x, p_included)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
exp_weights = np.zeros((len(x), n_portfolio))
n_incuded = int(p_included * len(x))
choosed_index = (-x).argsort(axis=0).argsort(axis=0) < n_incuded
for j in range(n_portfolio):
exp_weights[choosed_index[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_build_percent_with_group(n_samples: int, n_loops: int, p_included: float,
n_groups: int) -> None:
print("-" * 60)
print("Starting portfolio construction by percent with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, p_included: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
p_included,
n_loops,
n_groups))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = percent_build(x, p_included, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
grouped_ordering = pd.DataFrame(-x).groupby(groups).rank()
grouped_count = pd.DataFrame(-x).groupby(groups).transform(lambda x: x.count())
exp_weights = np.zeros((len(x), n_portfolio))
n_included = (grouped_count * p_included).astype(int)
masks = (grouped_ordering <= n_included).values
for j in range(n_portfolio):
exp_weights[masks[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_build_percent(3000, 1000, 0.1)
benchmark_build_percent_with_group(3000, 1000, 0.1, 30)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/portfolio/percentbuild.py
|
percentbuild.py
|
import datetime as dt
import numpy as np
from cvxopt import matrix
from cvxopt import solvers
from scipy.optimize import linprog
from alphamind.portfolio.linearbuilder import linear_builder
solvers.options['show_progress'] = False
def benchmark_build_linear(n_samples: int, n_risks: int, n_loop: int) -> None:
print("-" * 60)
print("Starting portfolio construction by linear programming")
print(
"Parameters(n_samples: {0}, n_risks: {1}, n_loop: {2})".format(n_samples, n_risks, n_loop))
er = np.random.randn(n_samples)
risk_exp = np.random.randn(n_samples, n_risks)
bm = np.random.rand(n_samples)
bm /= bm.sum()
lbound = -0.04
ubound = 0.05
risk_lbound = bm @ risk_exp
risk_ubound = bm @ risk_exp
start = dt.datetime.now()
for _ in range(n_loop):
status, v, x = linear_builder(er,
lbound,
ubound,
risk_exp,
risk_target=(risk_lbound,
risk_ubound))
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model (ECOS)', impl_model_time))
c = - er
bounds = [(lbound, ubound) for _ in range(n_samples)]
a_eq = np.ones((1, n_samples))
a_eq = np.vstack((a_eq, risk_exp.T))
b_eq = np.hstack((np.array([1.]), risk_exp.T @ bm))
start = dt.datetime.now()
for _ in range(n_loop):
res = linprog(c, A_eq=a_eq, b_eq=b_eq, bounds=bounds, options={'maxiter': 10000})
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model (scipy)', benchmark_model_time))
np.testing.assert_array_almost_equal(x, res['x'])
c = matrix(-er)
aneq = matrix(a_eq)
b = matrix(b_eq)
g = matrix(np.vstack((np.diag(np.ones(n_samples)), -np.diag(np.ones(n_samples)))))
h = matrix(np.hstack((ubound * np.ones(n_samples), -lbound * np.ones(n_samples))))
solvers.lp(c, g, h, solver='glpk')
start = dt.datetime.now()
for _ in range(n_loop):
res2 = solvers.lp(c, g, h, aneq, b, solver='glpk')
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model (glpk)', benchmark_model_time))
np.testing.assert_array_almost_equal(x, np.array(res2['x']).flatten())
if __name__ == '__main__':
benchmark_build_linear(2000, 30, 10)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/portfolio/linearbuild.py
|
linearbuild.py
|
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.portfolio.rankbuilder import rank_build
def benchmark_build_rank(n_samples: int, n_loops: int, n_included: int) -> None:
print("-" * 60)
print("Starting portfolio construction by rank benchmarking")
print("Parameters(n_samples: {0}, n_included: {1}, n_loops: {2})".format(n_samples, n_included,
n_loops))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = rank_build(x, n_included)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
exp_weights = np.zeros((len(x), n_portfolio))
choosed_index = (-x).argsort(axis=0).argsort(axis=0) < n_included
for j in range(n_portfolio):
exp_weights[choosed_index[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_build_rank_with_group(n_samples: int, n_loops: int, n_included: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting portfolio construction by rank with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_included: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_included,
n_loops,
n_groups))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = rank_build(x, n_included, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
grouped_ordering = pd.DataFrame(-x).groupby(groups).rank()
exp_weights = np.zeros((len(x), n_portfolio))
masks = (grouped_ordering <= n_included).values
for j in range(n_portfolio):
exp_weights[masks[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_build_rank(3000, 1000, 300)
benchmark_build_rank_with_group(3000, 1000, 10, 30)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/benchmarks/portfolio/rankbuild.py
|
rankbuild.py
|
import abc
from distutils.version import LooseVersion
import arrow
import numpy as np
import pandas as pd
from simpleutils.miscellaneous import list_eq
from sklearn import __version__ as sklearn_version
from xgboost import __version__ as xgbboot_version
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import decode
from alphamind.utilities import encode
class ModelBase(metaclass=abc.ABCMeta):
def __init__(self, features=None, fit_target=None):
if features is not None:
self.formulas = Transformer(features)
self.features = self.formulas.names
else:
self.features = None
if fit_target is not None:
self.fit_target = Transformer(fit_target)
else:
self.fit_target = None
self.impl = None
self.trained_time = None
def model_encode(self):
return encode(self.impl)
@classmethod
def model_decode(cls, model_desc):
return decode(model_desc)
def __eq__(self, rhs):
return self.model_encode() == rhs.model_encode() \
and self.trained_time == rhs.trained_time \
and list_eq(self.features, rhs.features) \
and encode(self.formulas) == encode(rhs.formulas) \
and encode(self.fit_target) == encode(rhs.fit_target)
def fit(self, x: pd.DataFrame, y: np.ndarray):
self.impl.fit(x[self.features].values, y.flatten())
self.trained_time = arrow.now().format("YYYY-MM-DD HH:mm:ss")
def predict(self, x: pd.DataFrame) -> np.ndarray:
return self.impl.predict(x[self.features].values)
def score(self, x: pd.DataFrame, y: np.ndarray) -> float:
return self.impl.score(x[self.features].values, y)
def ic(self, x: pd.DataFrame, y: np.ndarray) -> float:
predict_y = self.impl.predict(x[self.features].values)
return np.corrcoef(predict_y, y)[0, 1]
@abc.abstractmethod
def save(self) -> dict:
if self.__class__.__module__ == '__main__':
alpha_logger.warning(
"model is defined in a main module. The model_name may not be correct.")
model_desc = dict(model_name=self.__class__.__module__ + "." + self.__class__.__name__,
language='python',
saved_time=arrow.now().format("YYYY-MM-DD HH:mm:ss"),
features=list(self.features),
trained_time=self.trained_time,
desc=self.model_encode(),
formulas=encode(self.formulas),
fit_target=encode(self.fit_target),
internal_model=self.impl.__class__.__module__ + "." + self.impl.__class__.__name__)
return model_desc
@classmethod
@abc.abstractmethod
def load(cls, model_desc: dict):
obj_layout = cls()
obj_layout.features = model_desc['features']
obj_layout.formulas = decode(model_desc['formulas'])
obj_layout.trained_time = model_desc['trained_time']
obj_layout.impl = cls.model_decode(model_desc['desc'])
if 'fit_target' in model_desc:
obj_layout.fit_target = decode(model_desc['fit_target'])
else:
obj_layout.fit_target = None
return obj_layout
def create_model_base(party_name=None):
if not party_name:
return ModelBase
else:
class ExternalLibBase(ModelBase):
_lib_name = party_name
def save(self) -> dict:
model_desc = super().save()
if self._lib_name == 'sklearn':
model_desc[self._lib_name + "_version"] = sklearn_version
elif self._lib_name == 'xgboost':
model_desc[self._lib_name + "_version"] = xgbboot_version
else:
raise ValueError(
"3rd party lib name ({0}) is not recognized".format(self._lib_name))
return model_desc
@classmethod
def load(cls, model_desc: dict):
obj_layout = super().load(model_desc)
if cls._lib_name == 'sklearn':
current_version = sklearn_version
elif cls._lib_name == 'xgboost':
current_version = xgbboot_version
else:
raise ValueError(
"3rd party lib name ({0}) is not recognized".format(cls._lib_name))
if LooseVersion(current_version) < LooseVersion(
model_desc[cls._lib_name + "_version"]):
alpha_logger.warning(
'Current {2} version {0} is lower than the model version {1}. '
'Loaded model may work incorrectly.'.format(sklearn_version,
model_desc[cls._lib_name],
cls._lib_name))
return obj_layout
return ExternalLibBase
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/model/modelbase.py
|
modelbase.py
|
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions
from PyFin.api import DateGeneration
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import total_risk_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
def _merge_df(engine, names, factor_df, target_df, universe, dates, risk_model, neutralized_risk):
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(total_risk_factors).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
target_df = pd.merge(target_df, risk_df, on=['trade_date', 'code']).dropna()
if neutralized_risk:
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_y = target_df.copy()
risk_exp = train_x[neutralized_risk].values.astype(float)
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
else:
risk_exp = None
train_x = factor_df.copy()
train_y = target_df.copy()
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
codes = train_x['code'].values
date_label = pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()
dates = np.unique(date_label)
return target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes
def prepare_data(engine: SqlEngine,
factors: Union[Transformer, Iterable[object]],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None):
if warm_start > 0:
p = Period(frequency)
p = Period(length=-warm_start * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', start_date, p).strftime('%Y-%m-%d')
dates = makeSchedule(start_date,
end_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Forward)
dates = [d.strftime('%Y-%m-%d') for d in dates]
horizon = map_freq(frequency)
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
factor_df = engine.fetch_factor_range(universe,
factors=transformer,
dates=dates).sort_values(['trade_date', 'code'])
alpha_logger.info("factor data loading finished")
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
alpha_logger.info("fit target data loading finished")
industry_df = engine.fetch_industry_range(universe, dates=dates)
alpha_logger.info("industry data loading finished")
benchmark_df = engine.fetch_benchmark_range(benchmark, dates=dates)
alpha_logger.info("benchmark data loading finished")
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
df = pd.merge(df, benchmark_df, on=['trade_date', 'code'], how='left')
df = pd.merge(df, industry_df, on=['trade_date', 'code'])
df['weight'] = df['weight'].fillna(0.)
df.dropna(inplace=True)
return dates, df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code', 'weight', 'industry_code', 'industry'] + transformer.names]
def batch_processing(names,
x_values,
y_values,
groups,
group_label,
batch,
risk_exp,
pre_process,
post_process,
codes):
train_x_buckets = {}
train_y_buckets = {}
train_risk_buckets = {}
predict_x_buckets = {}
predict_y_buckets = {}
predict_risk_buckets = {}
predict_codes_bucket = {}
for i, start in enumerate(groups[:-batch]):
end = groups[i + batch]
left_index = bisect.bisect_left(group_label, start)
right_index = bisect.bisect_left(group_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
train_x_buckets[end] = pd.DataFrame(factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process),
columns=names)
train_y_buckets[end] = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
train_risk_buckets[end] = this_risk_exp
left_index = bisect.bisect_right(group_label, start)
right_index = bisect.bisect_right(group_label, end)
sub_dates = group_label[left_index:right_index]
this_raw_x = x_values[left_index:right_index]
this_codes = codes[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
predict_x_buckets[end] = pd.DataFrame(ne_x[inner_left_index:inner_right_index],
columns=names)
if risk_exp is not None:
predict_risk_buckets[end] = this_risk_exp[inner_left_index:inner_right_index]
else:
predict_risk_buckets = None
predict_codes_bucket[end] = this_codes[inner_left_index:inner_right_index]
this_raw_y = y_values[left_index:right_index]
if len(this_raw_y) > 0:
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
predict_y_buckets[end] = ne_y[inner_left_index:inner_right_index]
return train_x_buckets, \
train_y_buckets, \
train_risk_buckets, \
predict_x_buckets, \
predict_y_buckets, \
predict_risk_buckets, \
predict_codes_bucket
def fetch_data_package(engine: SqlEngine,
alpha_factors: Iterable[object],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
batch: int = 1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
fit_target: Union[Transformer, object] = None) -> dict:
alpha_logger.info("Starting data package fetching ...")
transformer = Transformer(alpha_factors)
names = transformer.names
dates, target_df, factor_df = prepare_data(engine,
transformer,
start_date,
end_date,
frequency,
universe,
benchmark,
warm_start + batch,
fit_target=fit_target)
target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes = \
_merge_df(engine, names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
alpha_logger.info("data merging finished")
target_df['weight'] = train_x['weight']
target_df['industry'] = train_x['industry']
target_df['industry_code'] = train_x['industry_code']
if neutralized_risk:
for i, name in enumerate(neutralized_risk):
target_df.loc[:, name] = risk_exp[:, i]
alpha_logger.info("Loading data is finished")
train_x_buckets, train_y_buckets, train_risk_buckets, predict_x_buckets, predict_y_buckets, predict_risk_buckets, predict_codes_bucket \
= batch_processing(names,
x_values,
y_values,
dates,
date_label,
batch,
risk_exp,
pre_process,
post_process,
codes)
alpha_logger.info("Data processing is finished")
ret = dict()
ret['x_names'] = names
ret['settlement'] = target_df[target_df.trade_date >= start_date]
train_x_buckets = {k: train_x_buckets[k] for k in train_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_y_buckets = {k: train_y_buckets[k] for k in train_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_risk_buckets = {k: train_risk_buckets[k] for k in train_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_x_buckets = {k: predict_x_buckets[k] for k in predict_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_y_buckets = {k: predict_y_buckets[k] for k in predict_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
if neutralized_risk:
predict_risk_buckets = {k: predict_risk_buckets[k] for k in predict_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
else:
predict_risk_buckets = None
predict_codes_bucket = {k: predict_codes_bucket[k] for k in predict_codes_bucket if
k.strftime('%Y-%m-%d') >= start_date}
ret['train'] = {'x': train_x_buckets, 'y': train_y_buckets, 'risk': train_risk_buckets}
ret['predict'] = {'x': predict_x_buckets, 'y': predict_y_buckets, 'risk': predict_risk_buckets,
'code': predict_codes_bucket}
return ret
def fetch_train_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None) -> dict:
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
target_df, factor_df = df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code'] + transformer.names]
target_df, dates, date_label, risk_exp, x_values, y_values, _, _, codes = \
_merge_df(engine, transformer.names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
require(len(dates) >= 2, ValueError,
"No previous data for training for the date {0}".format(ref_date))
end = dates[-2]
start = dates[-batch - 1] if batch <= len(dates) - 1 else dates[0]
else:
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
index = (date_label >= start) & (date_label <= end)
this_raw_x = x_values[index]
this_raw_y = y_values[index]
this_code = codes[index]
if risk_exp is not None:
this_risk_exp = risk_exp[index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ret = dict()
ret['x_names'] = transformer.names
ret['train'] = {'x': pd.DataFrame(ne_x, columns=transformer.names), 'y': ne_y,
'code': this_code}
return ret
def fetch_predict_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fillna: str = None,
fit_target: Union[Transformer, object] = None):
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch - 1) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fillna:
factor_df = factor_df.groupby('trade_date').apply(
lambda x: x.fillna(x.median())).reset_index(
drop=True).dropna()
else:
factor_df = factor_df.dropna()
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
names = transformer.names
if neutralized_risk:
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(neutralized_risk).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_x = pd.merge(train_x, target_df, on=['trade_date', 'code'], how='left')
risk_exp = train_x[neutralized_risk].values.astype(float)
else:
train_x = pd.merge(factor_df, target_df, on=['trade_date', 'code'], how='left')
risk_exp = None
train_x.dropna(inplace=True, subset=train_x.columns[:-1])
x_values = train_x[names].values.astype(float)
y_values = train_x[['dx']].values.astype(float)
date_label = pd.DatetimeIndex(train_x.trade_date).to_pydatetime()
dates = np.unique(date_label)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
left_index = bisect.bisect_left(date_label, start)
right_index = bisect.bisect_right(date_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
sub_dates = date_label[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
ne_x = ne_x[inner_left_index:inner_right_index]
ne_y = ne_y[inner_left_index:inner_right_index]
left_index = bisect.bisect_left(date_label, end)
right_index = bisect.bisect_right(date_label, end)
codes = train_x.code.values[left_index:right_index]
else:
ne_x = None
ne_y = None
codes = None
ret = dict()
ret['x_names'] = transformer.names
ret['predict'] = {'x': pd.DataFrame(ne_x, columns=transformer.names, index=codes), 'code': codes,
'y': ne_y.flatten()}
return ret
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/model/data_preparing.py
|
data_preparing.py
|
import numpy as np
from simpleutils.asserts import require
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression as LinearRegressionImpl
from sklearn.linear_model import LogisticRegression as LogisticRegressionImpl
from alphamind.model.modelbase import create_model_base
class ConstLinearModelImpl(object):
def __init__(self, weights: np.ndarray = None):
self.weights = weights.flatten()
def fit(self, x: np.ndarray, y: np.ndarray):
raise NotImplementedError("Const linear model doesn't offer fit methodology")
def predict(self, x: np.ndarray):
return x @ self.weights
def score(self, x: np.ndarray, y: np.ndarray) -> float:
y_hat = self.predict(x)
y_bar = y.mean()
ssto = ((y - y_bar) ** 2).sum()
sse = ((y - y_hat) ** 2).sum()
return 1. - sse / ssto
class ConstLinearModel(create_model_base()):
def __init__(self,
features=None,
weights: dict = None,
fit_target=None):
super().__init__(features=features, fit_target=fit_target)
if features is not None and weights is not None:
require(len(features) == len(weights),
ValueError,
"length of features is not equal to length of weights")
if weights:
self.impl = ConstLinearModelImpl(np.array([weights[name] for name in self.features]))
def save(self):
model_desc = super().save()
model_desc['weight'] = list(self.impl.weights)
return model_desc
@classmethod
def load(cls, model_desc: dict):
return super().load(model_desc)
@property
def weights(self):
return self.impl.weights.tolist()
class LinearRegression(create_model_base('sklearn')):
def __init__(self, features=None, fit_intercept: bool = False, fit_target=None, **kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = LinearRegressionImpl(fit_intercept=fit_intercept, **kwargs)
def save(self) -> dict:
model_desc = super().save()
model_desc['weight'] = self.impl.coef_.tolist()
return model_desc
@property
def weights(self):
return self.impl.coef_.tolist()
class LassoRegression(create_model_base('sklearn')):
def __init__(self, alpha=0.01, features=None, fit_intercept: bool = False, fit_target=None,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = Lasso(alpha=alpha, fit_intercept=fit_intercept, **kwargs)
def save(self) -> dict:
model_desc = super().save()
model_desc['weight'] = self.impl.coef_.tolist()
return model_desc
@property
def weights(self):
return self.impl.coef_.tolist()
class LogisticRegression(create_model_base('sklearn')):
def __init__(self, features=None, fit_intercept: bool = False, fit_target=None, **kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = LogisticRegressionImpl(fit_intercept=fit_intercept, **kwargs)
def save(self) -> dict:
model_desc = super().save()
model_desc['weight'] = self.impl.coef_.tolist()
return model_desc
@property
def weights(self):
return self.impl.coef_.tolist()
if __name__ == '__main__':
import pprint
ls = ConstLinearModel(['a', 'b'], np.array([0.5, 0.5]))
x = np.array([[0.2, 0.2],
[0.1, 0.1],
[0.3, 0.1]])
ls.predict(x)
desc = ls.save()
new_model = ConstLinearModel.load(desc)
pprint.pprint(new_model.save())
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/model/linearmodel.py
|
linearmodel.py
|
import arrow
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier as RandomForestClassifierImpl
from sklearn.ensemble import RandomForestRegressor as RandomForestRegressorImpl
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier as XGBClassifierImpl
from xgboost import XGBRegressor as XGBRegressorImpl
from alphamind.model.modelbase import create_model_base
class RandomForestRegressor(create_model_base('sklearn')):
def __init__(self,
n_estimators: int = 100,
max_features: str = 'auto',
features=None,
fit_target=None,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = RandomForestRegressorImpl(n_estimators=n_estimators,
max_features=max_features,
**kwargs)
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class RandomForestClassifier(create_model_base('sklearn')):
def __init__(self,
n_estimators: int = 100,
max_features: str = 'auto',
features=None,
fit_target=None,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = RandomForestClassifierImpl(n_estimators=n_estimators,
max_features=max_features,
**kwargs)
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class XGBRegressor(create_model_base('xgboost')):
def __init__(self,
n_estimators: int = 100,
learning_rate: float = 0.1,
max_depth: int = 3,
features=None,
fit_target=None,
n_jobs: int = 1,
missing: float = np.nan,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = XGBRegressorImpl(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
n_jobs=n_jobs,
missing=missing,
**kwargs)
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class XGBClassifier(create_model_base('xgboost')):
def __init__(self,
n_estimators: int = 100,
learning_rate: float = 0.1,
max_depth: int = 3,
features=None,
fit_target=None,
n_jobs: int = 1,
missing: float = np.nan,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.impl = XGBClassifierImpl(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
n_jobs=n_jobs,
missing=missing,
**kwargs)
self.impl = XGBClassifier.model_decode(self.model_encode())
@property
def importances(self):
return self.impl.feature_importances_.tolist()
class XGBTrainer(create_model_base('xgboost')):
def __init__(self,
objective='binary:logistic',
booster='gbtree',
tree_method='hist',
n_estimators: int = 100,
learning_rate: float = 0.1,
max_depth=3,
eval_sample=None,
early_stopping_rounds=None,
subsample=1.,
colsample_bytree=1.,
features=None,
fit_target=None,
random_state: int = 0,
n_jobs: int = 1,
**kwargs):
super().__init__(features=features, fit_target=fit_target)
self.params = {
'objective': objective,
'max_depth': max_depth,
'eta': learning_rate,
'booster': booster,
'tree_method': tree_method,
'subsample': subsample,
'colsample_bytree': colsample_bytree,
'nthread': n_jobs,
'seed': random_state
}
self.eval_sample = eval_sample
self.num_boost_round = n_estimators
self.early_stopping_rounds = early_stopping_rounds
self.impl = None
self.kwargs = kwargs
self.trained_time = None
def fit(self, x: pd.DataFrame, y: np.ndarray):
if self.eval_sample:
x_train, x_eval, y_train, y_eval = train_test_split(x[self.features].values,
y,
test_size=self.eval_sample,
random_state=42)
d_train = xgb.DMatrix(x_train, y_train)
d_eval = xgb.DMatrix(x_eval, y_eval)
self.impl = xgb.train(params=self.params,
dtrain=d_train,
num_boost_round=self.num_boost_round,
evals=[(d_eval, 'eval')],
verbose_eval=False,
**self.kwargs)
else:
d_train = xgb.DMatrix(x[self.features].values, y)
self.impl = xgb.train(params=self.params,
dtrain=d_train,
num_boost_round=self.num_boost_round,
**self.kwargs)
self.trained_time = arrow.now().format("YYYY-MM-DD HH:mm:ss")
def predict(self, x: pd.DataFrame) -> np.ndarray:
d_predict = xgb.DMatrix(x[self.features].values)
return self.impl.predict(d_predict)
@property
def importances(self):
imps = self.impl.get_fscore().items()
imps = sorted(imps, key=lambda x: x[0])
return list(zip(*imps))[1]
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/model/treemodel.py
|
treemodel.py
|
import bisect
import copy
from typing import Iterable
from typing import Tuple
import numpy as np
import pandas as pd
from simpleutils.miscellaneous import list_eq
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.universe import Universe
from alphamind.data.engines.universe import load_universe
from alphamind.data.rank import rank
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.model.data_preparing import fetch_predict_phase
from alphamind.model.data_preparing import fetch_train_phase
from alphamind.model.linearmodel import ConstLinearModel
from alphamind.model.loader import load_model
from alphamind.model.modelbase import ModelBase
PROCESS_MAPPING = {
'winsorize_normal': winsorize_normal,
'standardize': standardize,
'rank': rank,
}
def _map_process(processes):
if processes:
return [p if hasattr(p, '__call__') else PROCESS_MAPPING[p] for p in processes]
else:
return None
class DataMeta(object):
def __init__(self,
freq: str,
universe: Universe,
batch: int,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
data_source: str = None):
self.data_source = data_source
self.freq = freq
self.universe = universe
self.batch = batch
self.neutralized_risk = neutralized_risk
self.risk_model = risk_model
self.pre_process = _map_process(pre_process)
self.post_process = _map_process(post_process)
self.warm_start = warm_start
def __eq__(self, rhs):
return self.data_source == rhs.data_source \
and self.freq == rhs.freq \
and self.universe == rhs.universe \
and self.batch == rhs.batch \
and list_eq(self.neutralized_risk, rhs.neutralized_risk) \
and self.risk_model == rhs.risk_model \
and list_eq(self.pre_process, rhs.pre_process) \
and list_eq(self.post_process, rhs.post_process) \
and self.warm_start == rhs.warm_start
def save(self) -> dict:
return dict(
freq=self.freq,
universe=self.universe.save(),
batch=self.batch,
neutralized_risk=self.neutralized_risk,
risk_model=self.risk_model,
pre_process=[p.__name__ for p in self.pre_process] if self.pre_process else None,
post_process=[p.__name__ for p in self.post_process] if self.pre_process else None,
warm_start=self.warm_start,
data_source=self.data_source
)
@classmethod
def load(cls, data_desc: dict):
freq = data_desc['freq']
universe = load_universe(data_desc['universe'])
batch = data_desc['batch']
neutralized_risk = data_desc['neutralized_risk']
risk_model = data_desc['risk_model']
pre_process = data_desc['pre_process']
post_process = data_desc['post_process']
warm_start = data_desc['warm_start']
data_source = data_desc['data_source']
return cls(freq=freq,
universe=universe,
batch=batch,
neutralized_risk=neutralized_risk,
risk_model=risk_model,
pre_process=pre_process,
post_process=post_process,
warm_start=warm_start,
data_source=data_source)
def fetch_train_data(self,
ref_date,
alpha_model: ModelBase):
return fetch_train_phase(SqlEngine(self.data_source),
alpha_model.formulas,
ref_date,
self.freq,
self.universe,
self.batch,
self.neutralized_risk,
self.risk_model,
self.pre_process,
self.post_process,
self.warm_start,
fit_target=alpha_model.fit_target)
def fetch_predict_data(self,
ref_date: str,
alpha_model: ModelBase):
return fetch_predict_phase(SqlEngine(self.data_source),
alpha_model.formulas,
ref_date,
self.freq,
self.universe,
self.batch,
self.neutralized_risk,
self.risk_model,
self.pre_process,
self.post_process,
self.warm_start,
fillna=True,
fit_target=alpha_model.fit_target)
def train_model(ref_date: str,
alpha_model: ModelBase,
data_meta: DataMeta = None,
x_values: pd.DataFrame = None,
y_values: pd.DataFrame = None):
base_model = copy.deepcopy(alpha_model)
if not isinstance(alpha_model, ConstLinearModel):
if x_values is None:
train_data = data_meta.fetch_train_data(ref_date, alpha_model)
x_values = train_data['train']['x']
y_values = train_data['train']['y']
base_model.fit(x_values, y_values)
return base_model, x_values, y_values
def predict_by_model(ref_date: str,
alpha_model: ModelBase,
data_meta: DataMeta = None,
x_values: pd.DataFrame = None,
codes: Iterable[int] = None):
if x_values is None:
predict_data = data_meta.fetch_predict_data(ref_date, alpha_model)
codes, x_values = predict_data['predict']['code'], predict_data['predict']['x']
return pd.DataFrame(alpha_model.predict(x_values).flatten(), index=codes), x_values
class Composer:
def __init__(self,
alpha_model: ModelBase,
data_meta: DataMeta):
self.alpha_model = alpha_model
self.data_meta = data_meta
self.models = {}
self.is_updated = False
self.sorted_keys = None
def train(self, ref_date: str, x=None, y=None) -> Tuple[ModelBase, pd.DataFrame, pd.DataFrame]:
model, x, y = train_model(ref_date, self.alpha_model, self.data_meta, x, y)
self.models[ref_date] = model
self.is_updated = False
return model, x, y
def predict(self, ref_date: str, x: pd.DataFrame = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
model = self._fetch_latest_model(ref_date)
if x is None:
return predict_by_model(ref_date, model, self.data_meta)
else:
codes = x.index
return pd.DataFrame(model.predict(x).flatten(), index=codes), x
def score(self, ref_date: str, x: pd.DataFrame = None, y: np.ndarray = None,
d_type: str = 'test') \
-> Tuple[float, pd.DataFrame, pd.DataFrame]:
model = self._fetch_latest_model(ref_date)
if x is None or y is None:
if d_type == 'test':
test_data = self.data_meta.fetch_predict_data(ref_date, model)
x = test_data['predict']['x']
y = test_data['predict']['y']
else:
test_data = self.data_meta.fetch_train_data(ref_date, model)
x = test_data['train']['x']
y = test_data['train']['y']
return model.score(x, y), x, y
def ic(self, ref_date, x=None, y=None) -> Tuple[float, pd.DataFrame, pd.DataFrame]:
model = self._fetch_latest_model(ref_date)
if x is None or y is None:
test_data = self.data_meta.fetch_predict_data(ref_date, model)
x = test_data['predict']['x']
y = test_data['predict']['y']
return model.ic(x, y), x, y
def _fetch_latest_model(self, ref_date) -> ModelBase:
if self.is_updated:
sorted_keys = self.sorted_keys
else:
sorted_keys = sorted(self.models.keys())
self.sorted_keys = sorted_keys
self.is_updated = True
latest_index = bisect.bisect_left(sorted_keys, ref_date) - 1
return self.models[sorted_keys[latest_index]]
def __getitem__(self, ref_date) -> ModelBase:
return self.models[ref_date]
def save(self) -> dict:
return dict(
alpha_model=self.alpha_model.save(),
data_meta=self.data_meta.save()
)
@classmethod
def load(cls, comp_desc):
alpha_model = load_model(comp_desc['alpha_model'])
data_meta = DataMeta.load(comp_desc['data_meta'])
return cls(alpha_model, data_meta)
if __name__ == '__main__':
from alphamind.api import (industry_styles,
standardize,
winsorize_normal,
DataMeta,
LinearRegression,
fetch_data_package,
map_freq)
from PyFin.api import LAST, SHIFT
freq = '60b'
universe = Universe('custom', ['ashare_ex'])
batch = 1
neutralized_risk = industry_styles
risk_model = 'short'
pre_process = [winsorize_normal, standardize]
post_process = [standardize]
warm_start = 3
data_source = None
horizon = map_freq(freq)
engine = SqlEngine(data_source)
fit_intercept = True
kernal_feature = 'roe_q'
regress_features = {kernal_feature: LAST(kernal_feature),
kernal_feature + '_l1': SHIFT(kernal_feature, 1),
kernal_feature + '_l2': SHIFT(kernal_feature, 2),
kernal_feature + '_l3': SHIFT(kernal_feature, 3)
}
const_features = {kernal_feature: LAST(kernal_feature)}
fit_target = [kernal_feature]
data_meta = DataMeta(freq=freq,
universe=universe,
batch=batch,
neutralized_risk=neutralized_risk,
risk_model=risk_model,
pre_process=pre_process,
post_process=post_process,
warm_start=warm_start,
data_source=data_source)
alpha_model = LinearRegression(features=regress_features, fit_intercept=True,
fit_target=fit_target)
composer = Composer(alpha_model=alpha_model, data_meta=data_meta)
start_date = '2014-01-01'
end_date = '2016-01-01'
regression_model = LinearRegression(features=regress_features, fit_intercept=fit_intercept,
fit_target=fit_target)
regression_composer = Composer(alpha_model=regression_model, data_meta=data_meta)
data_package1 = fetch_data_package(engine,
alpha_factors=[kernal_feature],
start_date=start_date,
end_date=end_date,
frequency=freq,
universe=universe,
benchmark=906,
warm_start=warm_start,
batch=1,
neutralized_risk=neutralized_risk,
pre_process=pre_process,
post_process=post_process,
fit_target=fit_target)
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/model/composer.py
|
composer.py
|
import numpy as np
from pfopt.linear import LpOptimizer as _LpOptimizer
from pfopt.linear import L1LpOptimizer as _L1LpOptimizer
from pfopt.quadratic import QOptimizer as _QOptimizer
from pfopt.quadratic import DecomposedQOptimizer as _DecomposedQOptimizer
from pfopt.quadratic import TargetVarianceOptimizer as _TargetVarianceOptimizer
class LPOptimizer:
def __init__(self,
objective: np.array,
cons_matrix: np.ndarray,
lbound: np.ndarray,
ubound: np.ndarray,
method: str = "deprecated"):
self._optimizer = _LpOptimizer(cost=objective,
cons_matrix=cons_matrix,
lower_bound=lbound,
upper_bound=ubound)
self._x, self._f_eval, self._status = self._optimizer.solve(solver="ECOS")
def status(self):
return self._status
def feval(self):
return self._f_eval
def x_value(self):
return self._x
class L1LPOptimizer:
def __init__(self,
objective: np.array,
cons_matrix: np.ndarray,
current_pos: np.ndarray,
target_turn_over: float,
lbound: np.ndarray,
ubound: np.ndarray):
self._optimizer = _L1LpOptimizer(cost=objective,
benchmark=current_pos,
l1norm=target_turn_over,
cons_matrix=cons_matrix,
lower_bound=lbound,
upper_bound=ubound)
self._x, self._f_eval, self._status = self._optimizer.solve()
def status(self):
return self._status
def feval(self):
return self._f_eval
def x_value(self):
return self._x
class QuadraticOptimizer:
def __init__(self,
objective: np.array,
cons_matrix: np.ndarray = None,
lbound: np.ndarray = None,
ubound: np.ndarray = None,
penalty: float = 1.,
cov: np.ndarray = None,
factor_cov: np.ndarray = None,
factor_load: np.ndarray = None,
factor_special: np.ndarray = None):
if cov is None and factor_cov is not None:
self._optimizer = _DecomposedQOptimizer(cost=objective,
factor_var=factor_cov,
factor_load=factor_load,
factor_special=factor_special,
penalty=penalty,
cons_matrix=cons_matrix,
lower_bound=lbound,
upper_bound=ubound)
elif cov is not None:
self._optimizer = _QOptimizer(cost=objective,
variance=cov,
penalty=penalty,
cons_matrix=cons_matrix,
lower_bound=lbound,
upper_bound=ubound)
else:
raise ValueError("cov and factor cov can't be all empty")
self._x, self._f_eval, self._status = self._optimizer.solve()
def status(self):
return self._status
def feval(self):
return self._f_eval
def x_value(self):
return self._x
class TargetVolOptimizer:
def __init__(self,
objective: np.array,
cons_matrix: np.ndarray = None,
lbound: np.ndarray = None,
ubound: np.ndarray = None,
target_vol: float = 1.,
cov: np.ndarray = None,
factor_cov: np.ndarray = None,
factor_load: np.ndarray = None,
factor_special: np.ndarray = None):
if cov is None and factor_cov is not None:
self._optimizer = _TargetVarianceOptimizer(cost=objective,
variance_target=target_vol*target_vol,
factor_var=factor_cov,
factor_load=factor_load,
factor_special=factor_special,
cons_matrix=cons_matrix,
lower_bound=lbound,
upper_bound=ubound)
elif cov is not None:
self._optimizer = _TargetVarianceOptimizer(cost=objective,
variance_target=target_vol*target_vol,
variance=cov,
cons_matrix=cons_matrix,
lower_bound=lbound,
upper_bound=ubound)
else:
raise ValueError("cov and factor cov can't be all empty")
self._x, self._f_eval, self._status = self._optimizer.solve()
def status(self):
return self._status
def feval(self):
return self._f_eval
def x_value(self):
return self._x
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/portfolio/optimizers.py
|
optimizers.py
|
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from alphamind.portfolio.optimizers import (
QuadraticOptimizer,
TargetVolOptimizer
)
from alphamind.exceptions.exceptions import PortfolioBuilderException
def _create_bounds(lbound,
ubound,
bm,
risk_exposure,
risk_target):
if lbound is not None:
lbound = lbound - bm
if ubound is not None:
ubound = ubound - bm
if risk_exposure is not None:
cons_mat = risk_exposure.T
bm_risk = cons_mat @ bm
clbound = (risk_target[0] - bm_risk).reshape((-1, 1))
cubound = (risk_target[1] - bm_risk).reshape((-1, 1))
else:
cons_mat = None
clbound = None
cubound = None
return lbound, ubound, cons_mat, clbound, cubound
def _create_result(optimizer, bm):
if optimizer.status() == "optimal" or optimizer.status() == "optimal_inaccurate":
return optimizer.status(), optimizer.feval(), optimizer.x_value() + bm
else:
raise PortfolioBuilderException(optimizer.status())
def mean_variance_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float, None],
ubound: Union[np.ndarray, float, None],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
lam: float = 1.,
linear_solver: str = 'deprecated') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = QuadraticOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
penalty=lam,
cov=cov,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
def target_vol_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
vol_target: float = 1.,
linear_solver: str = 'ma27') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = TargetVolOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
target_vol=vol_target,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk,
cov=cov)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/portfolio/meanvariancebuilder.py
|
meanvariancebuilder.py
|
from typing import Tuple
from typing import Union
import numpy as np
from alphamind.portfolio.optimizers import LPOptimizer
from alphamind.portfolio.optimizers import L1LPOptimizer
from alphamind.exceptions.exceptions import PortfolioBuilderException
def linear_builder(er: np.ndarray,
lbound: Union[np.ndarray, float] = None,
ubound: Union[np.ndarray, float] = None,
risk_constraints: np.ndarray = None,
risk_target: Tuple[np.ndarray, np.ndarray] = None,
turn_over_target: float = None,
current_position: np.ndarray = None,
method: str = "deprecated") -> Tuple[str, np.ndarray, np.ndarray]:
er = er.flatten()
if risk_constraints is not None:
risk_lbound = risk_target[0].reshape((-1, 1))
risk_ubound = risk_target[1].reshape((-1, 1))
cons_matrix = np.concatenate((risk_constraints.T, risk_lbound, risk_ubound), axis=1)
else:
cons_matrix = None
if not turn_over_target or current_position is None:
prob = LPOptimizer(-er, cons_matrix, lbound, ubound)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value()
else:
raise PortfolioBuilderException(prob.status())
elif turn_over_target:
prob = L1LPOptimizer(objective=-er,
cons_matrix=cons_matrix,
current_pos=current_position,
target_turn_over=turn_over_target,
lbound=lbound,
ubound=ubound)
if prob.status() == 'optimal' or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value()
else:
raise PortfolioBuilderException(prob.status())
if __name__ == '__main__':
n = 5
lb = np.zeros(n)
ub = 4. / n * np.ones(n)
er = np.random.randn(n)
current_pos = np.random.randint(0, n, size=n)
current_pos = current_pos / current_pos.sum()
turn_over_target = 0.1
cons = np.ones((n, 1))
risk_lbound = np.ones(1)
risk_ubound = np.ones(1)
status, fvalue, x_values = linear_builder(er,
lb,
ub,
cons,
(risk_lbound, risk_ubound),
turn_over_target,
current_pos,
method='ecos')
print(status)
print(fvalue)
print(x_values)
print(current_pos)
print(np.abs(x_values - current_pos).sum())
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/portfolio/linearbuilder.py
|
linearbuilder.py
|
from enum import IntEnum
from math import inf
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from deprecated import deprecated
class BoundaryDirection(IntEnum):
LOWER = -1
UPPER = 1
class BoundaryType(IntEnum):
ABSOLUTE = 0
RELATIVE = 1
MAXABSREL = 2
MINABSREL = 3
class BoundaryImpl(object):
def __init__(self,
direction: BoundaryDirection,
b_type: BoundaryType,
val):
self.direction = direction
self.b_type = b_type
self.val = val
self._validation()
def _validation(self):
require(
self.b_type in [BoundaryType.ABSOLUTE, BoundaryType.RELATIVE, BoundaryType.MAXABSREL,
BoundaryType.MINABSREL],
ValueError,
"Boundary Type {0} is not recognized".format(self.b_type))
require(
self.direction == BoundaryDirection.LOWER or self.direction == BoundaryDirection.UPPER,
ValueError,
"Boundary direction {0} is not recognized".format(self.direction))
def __call__(self, center: float):
if self.b_type == BoundaryType.ABSOLUTE:
return self.val + center
elif self.b_type == BoundaryType.MAXABSREL:
abs_threshold = self.val[0]
rel_threshold = self.val[1]
if self.direction == BoundaryDirection.LOWER:
rel_bound = center - abs(center) * rel_threshold
abs_bound = center - abs_threshold
return min(rel_bound, abs_bound)
elif self.direction == BoundaryDirection.UPPER:
rel_bound = center + abs(center) * rel_threshold
abs_bound = center + abs_threshold
return max(rel_bound, abs_bound)
elif self.b_type == BoundaryType.MINABSREL:
abs_threshold = self.val[0]
rel_threshold = self.val[1]
if self.direction == BoundaryDirection.LOWER:
rel_bound = center - abs(center) * rel_threshold
abs_bound = center - abs_threshold
return max(rel_bound, abs_bound)
elif self.direction == BoundaryDirection.UPPER:
rel_bound = center + abs(center) * rel_threshold
abs_bound = center + abs_threshold
return min(rel_bound, abs_bound)
else:
require(center >= 0., ValueError,
"relative bounds only support positive back bone value")
return self.val * center
class BoxBoundary(object):
def __init__(self,
lower_bound: BoundaryImpl,
upper_bound: BoundaryImpl):
self.lower = lower_bound
self.upper = upper_bound
def bounds(self, center):
l_b, u_b = self.lower(center), self.upper(center)
require(l_b <= u_b, ValueError, "lower bound should be lower then upper bound")
return l_b, u_b
def create_box_bounds(names: List[str],
b_type: Union[Iterable[BoundaryType], BoundaryType],
l_val: Union[Iterable[float], float],
u_val: Union[Iterable[float], float]) -> Dict[str, BoxBoundary]:
"""
helper function to quickly create a series of bounds
"""
bounds = dict()
if not hasattr(b_type, '__iter__'):
b_type = np.array([b_type] * len(names))
if not hasattr(l_val, '__iter__'):
l_val = np.array([l_val] * len(names))
if not hasattr(u_val, '__iter__'):
u_val = np.array([u_val] * len(names))
for i, name in enumerate(names):
lower = BoundaryImpl(BoundaryDirection.LOWER,
b_type[i],
l_val[i])
upper = BoundaryImpl(BoundaryDirection.UPPER,
b_type[i],
u_val[i])
bounds[name] = BoxBoundary(lower, upper)
return bounds
class LinearConstraints(object):
def __init__(self,
bounds: Dict[str, BoxBoundary],
cons_mat: pd.DataFrame,
backbone: np.ndarray = None):
self.names = list(set(bounds.keys()).intersection(set(cons_mat.columns)))
self.bounds = bounds
self.cons_mat = cons_mat
self.backbone = backbone
require(cons_mat.shape[0] == len(backbone) if backbone is not None else True,
"length of back bond should be same as number of rows of cons_mat")
def risk_targets(self) -> Tuple[np.ndarray, np.ndarray]:
lower_bounds = []
upper_bounds = []
if self.backbone is None:
backbone = np.zeros(len(self.cons_mat))
else:
backbone = self.backbone
for name in self.names:
center = backbone @ self.cons_mat[name].values
l, u = self.bounds[name].bounds(center)
lower_bounds.append(l)
upper_bounds.append(u)
return np.array(lower_bounds), np.array(upper_bounds)
@property
def risk_exp(self) -> np.ndarray:
return self.cons_mat[self.names].values
@deprecated(
reason="Constraints is deprecated in alpha-mind 0.1.1. Please use LinearConstraints instead.")
class Constraints(object):
def __init__(self,
risk_exp: Optional[np.ndarray] = None,
risk_names: Optional[np.ndarray] = None):
self.risk_exp = risk_exp
if risk_names is not None:
self.risk_names = np.array(risk_names)
else:
self.risk_names = np.array([])
n = len(self.risk_names)
self.risk_maps = dict(zip(self.risk_names, range(n)))
self.lower_bounds = -inf * np.ones(n)
self.upper_bounds = inf * np.ones(n)
def set_constraints(self, tag: str, lower_bound: float, upper_bound: float):
index = self.risk_maps[tag]
self.lower_bounds[index] = lower_bound
self.upper_bounds[index] = upper_bound
def add_exposure(self, tags: np.ndarray, new_exp: np.ndarray):
if len(tags) != new_exp.shape[1]:
raise ValueError(
'new dags length is not compatible with exposure shape {1}'.format(len(tags),
new_exp.shape))
for tag in tags:
if tag in self.risk_maps:
raise ValueError('tag {0} is already in risk table'.format(tag))
self.risk_names = np.concatenate((self.risk_names, tags))
if self.risk_exp is not None:
self.risk_exp = np.concatenate((self.risk_exp, new_exp), axis=1)
else:
self.risk_exp = new_exp
n = len(self.risk_names)
self.risk_maps = dict(zip(self.risk_names, range(n)))
self.lower_bounds = np.concatenate((self.lower_bounds, -inf * np.ones(len(tags))))
self.upper_bounds = np.concatenate((self.upper_bounds, inf * np.ones(len(tags))))
def risk_targets(self) -> Tuple[np.ndarray, np.ndarray]:
return self.lower_bounds, self.upper_bounds
if __name__ == '__main__':
risk_exp = np.array([[1.0, 2.0],
[3.0, 4.0]])
risk_names = np.array(['a', 'b'])
cons = Constraints(risk_exp, risk_names)
cons.set_constraints('b', 0.0, 0.1)
print(cons.risk_targets())
|
Alpha-Mind
|
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/portfolio/constraints.py
|
constraints.py
|
import pandas as pd
import numpy as np
class Factor_get_method(object):
def __init__(self) -> None:
pass
def get_all_tables(self,con):
sql = "select name from sqlite_master where type ='table' order by name"
c = con.cursor()
result = c.execute(sql)
factorfilelist = [i[0] for i in result.fetchall()]
return factorfilelist
def sql_fetch(self,con,tablename):
cursorObj = con.cursor()
cursorObj.execute('PRAGMA table_info("{}")'.format(tablename))
return cursorObj.fetchall()
def sql_exec(self,sql,sqlcols,conn):
cur = conn.cursor()
result = cur.execute(sql)
result = pd.DataFrame(result,columns = sqlcols).set_index(['date','symbol'])
return result
def get_prev_days_factor_by_name(self,factorname:str,date:str,conn):
sql = "select * from {} where {}.date >= '{}'".format(factorname,factorname,date)
sqlcols = [txt[1] for txt in self.sql_fetch(conn,factorname)]
return self.sql_exec(sql,sqlcols,conn)
def get_selected_date_factor_by_name(self,factorname:str,date:str,conn):
sql = "select * from {} where {}.date = '{}'".format(factorname,factorname,date)
sqlcols = [txt[1] for txt in self.sql_fetch(conn,factorname)]
return self.sql_exec(sql,sqlcols,conn)
def mmt_intraday_M(tempClose,tempOpen):
# 1个月日内动量
mmt_intraday_M = (tempClose/tempOpen - 1).iloc[-22:].cumsum()
mmt_intraday_M = pd.DataFrame(mmt_intraday_M.iloc[-1:].stack(),columns = ['mmt_intraday_M'])
return mmt_intraday_M
# 一个月振幅调整动量
def mmt_range_M(tempHigh,tempLow,tempClose):
High_m = tempHigh.iloc[-22:].max()
Low_m = tempLow.iloc[-22:].min()
mmt_range_M = (High_m-Low_m)/tempClose.shift(22)
mmt_range_M = pd.DataFrame(mmt_range_M.iloc[-1:].stack(),columns = ['mmt_range_M'])
return mmt_range_M
def mmt_overnight_M(tempOpen,tempClose):
# 隔夜动量
mmt_overnight = tempOpen/tempClose.shift(1) - 1
todaydate = mmt_overnight.index[-1]
mmt_overnight_M = pd.DataFrame(mmt_overnight.iloc[-20:].sum(),columns = ['mmt_overnight_M'])
mmt_overnight_M['date'] = todaydate
mmt_overnight_M = mmt_overnight_M.reset_index().set_index(['date','symbol'])
return mmt_overnight_M
def mmt_route_M(tempClose):
# 路径调整动量
mmt_route_M = (tempClose/tempClose.shift(20) - 1)/abs(tempClose/tempClose.shift(1)-1).rolling(20).sum()
mmt_route_M = pd.DataFrame(mmt_route_M.iloc[-1:].stack(),columns = ['mmt_route_M'])
return mmt_route_M
def mmt_discrete_M(tempClose):
# 信息离散度动量
daily_up = (tempClose/tempClose.shift(1)-1).applymap(lambda x: int(x>0) if not np.isnan(x) else np.nan)
daily_down = (tempClose/tempClose.shift(1)-1).applymap(lambda x: int(x<0) if not np.isnan(x) else np.nan)
mmt_discrete_M = daily_up.rolling(20).sum()/20-daily_down.rolling(20).sum()/20
mmt_discrete_M = pd.DataFrame(mmt_discrete_M.iloc[-1:].stack(),columns = ['mmt_discrete_M'])
return mmt_discrete_M
def mmt_sec_rank_M(tempClose):
# 截面rank动量
mmt_sec_rank_M = (tempClose/tempClose.shift(1)-1).rank(axis = 1).rolling(20).mean()
mmt_sec_rank_M = pd.DataFrame(mmt_sec_rank_M.iloc[-1:].stack(),columns = ['mmt_sec_rank_M'])
return mmt_sec_rank_M
def mmt_time_rank_M(anaual_close):
# 时序rank_score
# anaual_close = Close.iloc[-272:]
mmt_time_rank_M = (anaual_close/anaual_close.shift(1)-1).rolling(252,min_periods = 100).rank().rolling(20).mean()
mmt_time_rank_M = pd.DataFrame(mmt_time_rank_M.iloc[-1:].stack(),columns = ['mmt_time_rank_M'])
return mmt_time_rank_M
def mmt_highest_days_A(anaual_High):
# 最高价距今天数
todaydate = anaual_High.index[-1]
mmt_highest_days_A = 252- anaual_High.iloc[-252:].apply(lambda x: x.argmax())
mmt_highest_days_A = pd.DataFrame(mmt_highest_days_A,columns= ['mmt_highest_days_A'])
mmt_highest_days_A['date'] = todaydate
mmt_highest_days_A = mmt_highest_days_A.reset_index().set_index(['date','symbol'])
return mmt_highest_days_A
def volumestable(volume):
# 成交量稳定度
vol_m = volume.rolling(20).mean()
vol_std = volume.rolling(20).std()
volumestable = (vol_m/vol_std)
volumestable = pd.DataFrame(volumestable.iloc[-1:].stack(),columns = ['volumestable'])
return volumestable
def re_con(tempClose):
# 收益一致性因子
import numpy as np
d5_r = tempClose.pct_change(5).iloc[-1:]/5
d10_r = tempClose.pct_change(10).iloc[-1:]/10/np.sqrt(2)
d15_r = tempClose.pct_change(15).iloc[-1:]/15/np.sqrt(3)
con = pd.concat([d5_r.stack(),d10_r.stack(),d15_r.stack()],axis = 1).dropna()
con = con.mean(axis =1)/con.std(axis = 1)
con = con.unstack()
con_output = con.rank(axis = 1)
con_output = con_output.apply(lambda x: x-x.mean(),axis = 1).abs()
_con = pd.DataFrame(con_output.iloc[-1:].stack(),columns = ['_con'])
return _con
def bofu_money(tempHigh,tempLow,tempOpen,total_turnover):
# 波幅/成交额
bofu_money = (tempHigh-tempLow)/tempOpen/total_turnover
bofu_money = pd.DataFrame(bofu_money.iloc[-1:].stack(),columns = ['bofu_money'])
return bofu_money
def vol_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = pd.DataFrame(ret.std(),columns = ['vol_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_up_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = (ret*(ret>0).astype(int)).replace(0,np.nan)
df = pd.DataFrame(df.std(),columns = ['vol_up_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_down_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = (ret*(ret<0).astype(int)).replace(0,np.nan)
df = pd.DataFrame(df.std(),columns = ['vol_down_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_highlow_avg(high,low,periodname,perioddays):
ratio = (high/low).iloc[-perioddays:]
todaydate = ratio.index[-1]
df = pd.DataFrame(ratio.mean(),columns = ['vol_highlow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_highlow_std(high,low,periodname,perioddays):
ratio = (high/low).iloc[-perioddays:]
todaydate = ratio.index[-1]
df = pd.DataFrame(ratio.std(),columns = ['vol_highlow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_updown_ratio(df,periodname,perioddays):
upstd = vol_up_std(df,periodname,perioddays)
downstd = vol_down_std(df,periodname,perioddays)
updownratio = pd.DataFrame(upstd['vol_up_std_'+periodname]/downstd['vol_down_std_'+periodname],columns = ['vol_updown_ratio_'+periodname])
return updownratio
def vol_highlow_stable(high,low,periodname,perioddays):
hlavg = vol_highlow_avg(high,low,periodname,perioddays)
hlstd = vol_highlow_std(high,low,periodname,perioddays)
hlstable = pd.DataFrame(hlavg['vol_highlow_avg_'+periodname]/hlstd['vol_highlow_std_'+periodname],columns = ['vol_highlow_stable_'+periodname])
return hlstable
def vol_upshadow_avg(High,Open,Close,periodname,perioddays):
multiper = (Open>Close).astype(int)
Open_Close_max = multiper*Open + (1-multiper)*Close
upshadow_df = ((High - Open_Close_max)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.mean(),columns = ['vol_upshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_upshadow_std(High,Open,Close,periodname,perioddays):
multiper = (Open>Close).astype(int)
Open_Close_max = multiper*Open + (1-multiper)*Close
upshadow_df = ((High - Open_Close_max)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.std(),columns = ['vol_upshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_upshadow_stable(High,Open,Close,periodname,perioddays):
avg = vol_upshadow_avg(High,Open,Close,periodname,perioddays)
std = vol_upshadow_std(High,Open,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_upshadow_avg_'+periodname]/std['vol_upshadow_std_'+periodname],columns = ['vol_upshadow_stable_'+periodname])
return df
def vol_downshadow_avg(Low,Open,Close,periodname,perioddays):
multiper = (Open<Close).astype(int)
Open_Close_min = multiper*Open + (1-multiper)*Close
downshadow_df = ((Open_Close_min - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.mean(),columns = ['vol_downshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_downshadow_std(Low,Open,Close,periodname,perioddays):
multiper = (Open<Close).astype(int)
Open_Close_min = multiper*Open + (1-multiper)*Close
downshadow_df = ((Open_Close_min - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.std(),columns = ['vol_downshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_downshadow_stable(Low,Open,Close,periodname,perioddays):
avg = vol_downshadow_avg(Low,Open,Close,periodname,perioddays)
std = vol_downshadow_std(Low,Open,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_downshadow_avg_'+periodname]/std['vol_downshadow_std_'+periodname],columns = ['vol_downshadow_stable_'+periodname])
return df
def vol_w_downshadow_avg(Low,Close,periodname,perioddays):
downshadow_df = ((Close - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.mean(),columns = ['vol_w_downshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_downshadow_std(Low,Close,periodname,perioddays):
downshadow_df = ((Close - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.std(),columns = ['vol_w_downshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_downshadow_stable(Low,Close,periodname,perioddays):
avg = vol_w_downshadow_avg(Low,Close,periodname,perioddays)
std = vol_w_downshadow_std(Low,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_w_downshadow_avg_'+periodname]/std['vol_w_downshadow_std_'+periodname],columns = ['vol_w_downshadow_stable_'+periodname])
return df
def vol_w_upshadow_avg(High,Close,periodname,perioddays):
upshadow_df = ((High - Close)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.mean(),columns = ['vol_w_upshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_upshadow_std(High,Close,periodname,perioddays):
upshadow_df = ((High - Close)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.std(),columns = ['vol_w_upshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_upshadow_stable(High,Close,periodname,perioddays):
avg = vol_w_upshadow_avg(High,Close,periodname,perioddays)
std = vol_w_upshadow_std(High,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_w_upshadow_avg_'+periodname]/std['vol_w_upshadow_std_'+periodname],columns = ['vol_w_upshadow_stable_'+periodname])
return df
def liq_turn_avg(tovr_r,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r_df.index[-1]
df = pd.DataFrame(tovr_r_df.mean(),columns = ['liq_turn_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_turn_std(tovr_r,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r_df.index[-1]
df = pd.DataFrame(tovr_r_df.std(),columns = ['liq_turn_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_vstd(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df = ret.iloc[-perioddays:]
df = pd.DataFrame(tovr_r_df.mean()/ret_df.std(),columns = ['liq_vstd_'+periodname])
todaydate = tovr_r_df.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_avg(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df_abs = ret.iloc[-perioddays:].abs()
amihud = ret_df_abs/tovr_r_df
df = pd.DataFrame(amihud.mean(),columns = ['liq_amihud_avg_'+periodname])
todaydate = amihud.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_std(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df_abs = ret.iloc[-perioddays:].abs()
amihud = ret_df_abs/tovr_r_df
df = pd.DataFrame(amihud.std(),columns = ['liq_amihud_std_'+periodname])
todaydate = amihud.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_stable(tovr_r,ret,periodname,perioddays):
avg = liq_amihud_avg(tovr_r,ret,periodname,perioddays)
std = liq_amihud_std(tovr_r,ret,periodname,perioddays)
v = avg['liq_amihud_avg_'+periodname]/std['liq_amihud_std_'+periodname]
df = pd.DataFrame(v,columns = ['liq_amihud_stable_'+periodname])
return df
def liq_shortcut_avg(tovr_r,High,Low,Open,Close,periodname,perioddays):
shortcut = 2*(High - Low) - (Open - Close).abs()
v = shortcut.iloc[-perioddays:]/tovr_r.iloc[-perioddays:]
df = pd.DataFrame(v.mean(),columns = ['liq_shortcut_avg_'+periodname])
todaydate = v.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_shortcut_std(tovr_r,High,Low,Open,Close,periodname,perioddays):
shortcut = 2*(High - Low) - (Open - Close).abs()
v = shortcut.iloc[-perioddays:]/tovr_r.iloc[-perioddays:]
df = pd.DataFrame(v.std(),columns = ['liq_shortcut_std_'+periodname])
todaydate = v.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_shortcut_stable(tovr_r,High,Low,Open,Close,periodname,perioddays):
avg = liq_shortcut_avg(tovr_r,High,Low,Open,Close,periodname,perioddays)
std = liq_shortcut_std(tovr_r,High,Low,Open,Close,periodname,perioddays)
v = avg['liq_shortcut_avg_'+periodname]/std['liq_shortcut_std_'+periodname]
df = pd.DataFrame(v,columns = ['liq_shortcut_stable_'+periodname])
return df
def PLUS(Close, High, Low):
plus = (2*Close - High - Low)/Close.shift(1)
todaydate = plus.index[-1]
df = pd.DataFrame(plus.iloc[-1])
df.columns = ['PLUS']
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_std_w_plus(Close, High, Low, tovr_r,periodname,perioddays):
plus = PLUS(Close, High, Low)
liq_std = liq_turn_std(tovr_r,periodname,perioddays)
plus['PLUS'] = plus['PLUS'].groupby(level = 'date', group_keys=False).apply(lambda x: x-min(0,x.min()))
swp = liq_std['liq_turn_std_'+periodname]*plus['PLUS']
df = pd.DataFrame(swp,columns = ['liq_std_w_plus_'+periodname])
return df
def tvr_std(tovr_r,periodname,perioddays):
df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r.index[-1]
fc = pd.DataFrame(df.std())
fc.columns = ['tvr_std_'+periodname]
fc['date'] = todaydate
fc = fc.reset_index().set_index(['date','symbol'])
return fc.sort_index()
def HL_Sprd(close,high,low,perioddays):
todaydate = close.index[-1]
sprd = (high/low - 1).iloc[-perioddays:]
close_ = close.iloc[-perioddays:]
phigh = close_.apply(lambda x: x>x.quantile(0.75)).astype(int).replace(0,np.nan)
plow = close_.apply(lambda x: x<x.quantile(0.25)).astype(int).replace(0,np.nan)
vhigh = pd.DataFrame((sprd*phigh).mean())
vlow = pd.DataFrame((sprd*plow).mean())
vlow['date'],vhigh['date'] = todaydate,todaydate
vhigh = vhigh.set_index('date',append=True).swaplevel()
vlow = vlow.set_index('date',append=True).swaplevel()
hlsprd = vhigh-vlow
hlsprd.columns = ['HL_Sprd']
return hlsprd.dropna()
def corr_price_turn(timerange,pricebyday,periodname):
price = pricebyday.loc[timerange]
fc = price[['close','turnover_ratio']].groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['close']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_price_turn_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def corr_ret_turn_post(timerange,pricebyday,periodname):
pricedf = pricebyday.loc[timerange]
pricedf['turnover_ratio'] = pricedf['turnover_ratio'].unstack().sort_index().shift(1).stack() # post
pricedf['ret'] = pricedf['close'].unstack().sort_index().pct_change().stack()
fc = pricedf[['ret','turnover_ratio']].groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['ret']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_ret_turn_post_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def corr_ret_turnd(timerange,pricebyday,periodname):
pricedf = pricebyday.loc[timerange]
pricedf['turnover_ratio_pct'] = pricedf['turnover_ratio'].unstack().sort_index().pct_change().stack()
pricedf['ret'] = pricedf['close'].unstack().sort_index().pct_change().stack()
fc = pricedf[['ret','turnover_ratio_pct']].dropna().groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['ret']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_ret_turnd_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def mts(sta_del_extrm,minbar,todaydate):
mts = sta_del_extrm[['single_trade_amt']]
mts['total_turnover'] = minbar['total_turnover']
mts = mts.groupby(level = 'symbol').corr()[::2]['total_turnover'].droplevel(1)
mts= pd.DataFrame(mts)
mts.columns = ['mts']
mts['date'] = todaydate
mts = mts.reset_index().set_index(['date','symbol'])
return mts
def mte(sta_del_extrm,minbar,todaydate):
mte = sta_del_extrm[['single_trade_amt']]
mte['close'] = minbar['close']
mte = mte.groupby(level = 'symbol').corr()[::2]['close'].droplevel(1)
mte= pd.DataFrame(mte)
mte.columns = ['mte']
mte['date'] = todaydate
mte = mte.reset_index().set_index(['date','symbol'])
return mte
def qua(sta_del_extrm,todaydate):
qua = sta_del_extrm.groupby(level = 'symbol').\
apply(lambda x: (x['single_trade_amt'].quantile(0.1)-\
x['single_trade_amt'].min())/(x['single_trade_amt'].max()-x['single_trade_amt'].min()))
qua = pd.DataFrame(qua,columns = ['qua'])
qua['date'] = todaydate
qua = qua.reset_index().set_index(['date','symbol'])
qua.index.name = ('date','symbol')
return qua
def skew(sta_50pct,todaydate):# 偏度因子skew
skew = sta_50pct.groupby(level = 'symbol').\
apply(lambda x: (((x['single_trade_amt']-x['single_trade_amt'].mean())/x['single_trade_amt'].std())**3).mean())
skew = pd.DataFrame(skew,columns = ['skew'])
skew['date'] = todaydate
skew = skew.reset_index().set_index(['date','symbol'])
skew.index.name = ('date','symbol')
return skew
def s_reverse(sing_trade_amt,minbar,todaydate):# 强反转因子
minute_r = sing_trade_amt.copy()
minute_r['minute_r'] = minbar['close']/minbar['open'] - 1
minute_r = minute_r.set_index('trading_date',append = True)
s_reverse = minute_r.groupby(level = 0).\
apply(lambda x: x[x.single_trade_amt > x.single_trade_amt.quantile(0.8)].minute_r.sum())
s_reverse = pd.DataFrame(s_reverse,columns = ['s_reverse'])
s_reverse['date'] = todaydate
s_reverse = s_reverse.reset_index().set_index(['date','symbol'])
s_reverse.index.name = ('date','symbol')
return s_reverse
def daily_sta_90pct(sta_del_extrm):# 日单笔成交额90分位值
daily_sta = sta_del_extrm.set_index('trading_date',append = True).rename_axis(index = {'trading_date':'date'})
daily_sta_90pct = daily_sta.droplevel('datetime').groupby(level = 'symbol').apply(lambda x: x.groupby(level = 1).quantile(0.9)).reset_index().set_index(['date','symbol'])
daily_sta_90pct.columns = ['daily_sta_90pct']
return daily_sta_90pct
def ideal_reverse(daily_sta_cal,Close):
daily_sta_cal['day_return'] = Close.pct_change().stack()
by_stock = list(daily_sta_cal.groupby(level = 1))
def apply_rolling_cal(rollingdata):
if len(rollingdata.index)<20:
return
else:
temp = rollingdata.sort_values('daily_sta_90pct')
returndf = rollingdata.iloc[-1:].copy()
returndf['ideal_reverse'] = temp.iloc[:10].day_return.sum() - temp.iloc[10:].day_return.sum()
return returndf['ideal_reverse']
ideal_reverse = list(map(lambda x:apply_rolling_cal(x[1]),by_stock))
ideal_reverse = pd.concat(ideal_reverse)
ideal_reverse = pd.DataFrame(ideal_reverse)
ideal_reverse.columns = ['ideal_reverse']
return ideal_reverse
|
Alpha-Planet
|
/Alpha_Planet-0.0.2-py3-none-any.whl/Alpha_Rabbit/Factor_Def_and_Get_Method.py
|
Factor_Def_and_Get_Method.py
|
import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
from tqdm import *
class single_signal_test(object):
def __init__(self) -> None:
pass
def cal_turnover(self,df,ndays):
# holdings:
# pd.Series
# multiindex: timestamp,code
# 值都是1
holdings = df.copy()
holdings = holdings.unstack().dropna(how ='all',axis = 1)
holdings = holdings.apply(lambda x: x/x.sum(),axis = 1)
holdings = holdings.fillna(0)
return (holdings.diff(ndays).abs().sum(axis = 1)/2)
def cal_holdingnums(self,df):
# holdings:
# pd.Series
# multiindex: timestamp,code
# 值都是1
holdings = df.copy()
holdings = holdings.groupby(level = 0).sum()
return holdings
def one_factor_grouper(self,df,factorname,quantiles,qcut): # 分组
# concatdf:pd.DataFrame
# factorname: str
# multiindex: timestamp,code
# columns: nday_return, factorname1, factorname2...
concatdf = df[[factorname]].copy().round(6)# 设置保留小数!
concatdf[factorname+'_rank'] = concatdf[factorname].groupby(level = 'date', group_keys = False).rank()
if qcut:
concatdf[factorname+'_quantile'] =concatdf[factorname+'_rank'].dropna().groupby(level = 'date', group_keys = False).apply(lambda x: pd.qcut(x,quantiles,labels=list(range(1,quantiles+1)))).astype(int)
else:
concatdf[factorname+'_quantile'] =concatdf[factorname+'_rank'].dropna().groupby(level = 'date', group_keys = False).apply(lambda x: pd.cut(x,quantiles,labels=list(range(1,quantiles+1)))).astype(int)
return concatdf
def one_factor_return(self,df,factorname,ndays,return_col,w_method,demean = False): # 计算分组收益
if w_method =='average':
qreturn = df.groupby(level = 'date', group_keys = True).apply(lambda x: x.groupby(factorname+'_quantile')[[return_col]].mean()/ndays).unstack()
qreturn.columns = [i[1] for i in list(qreturn)]
if w_method == 'factor_weighted':
tmpdf = df.copy()
tmpdf['rwf'] = tmpdf[return_col]*tmpdf[factorname]
tmpdf.dropna(subset = ['rwf'],inplace = True)
qreturn = tmpdf.groupby(level = 'date', group_keys = True).\
apply(lambda x: x.groupby(factorname+'_quantile').\
apply(lambda x: x['rwf'].sum()/x[factorname].sum() if x[factorname].sum()>0 else 0)/ndays)
# qreturn = tmpdf.groupby(level = 'date', group_keys = True).\
# apply(lambda x: x.groupby(factorname+'_quantile').\
# apply(lambda x: (x[return_col]*x[factorname]).sum()/x[factorname].sum())/ndays)
if w_method =='cap_weighted':
qreturn = df.groupby(level = 'date', group_keys = True).\
apply(lambda x: x.groupby(factorname+'_quantile').\
apply(lambda x: (x[return_col]*x['cap']).sum()/x['cap'].sum())/ndays)
if len(qreturn.index.names)==1:
pass
else:
qreturn= qreturn.unstack().apply(lambda x: x.fillna(x.mean()),axis = 1)
if demean:
qreturn = qreturn.apply(lambda x: x-x.mean(),axis = 1)
return qreturn
def one_factor_icir(self,df,factorname,return_col):
from scipy import stats
ic = df.groupby(level = 'date').apply(lambda x: x[[return_col,factorname]].corr('spearman'))
ic_org = ic[ic.index.get_level_values(1) ==return_col][factorname].dropna()
ictable = ic_org.describe()[['count','mean','std','min','max']].copy()
ictable['risk_adj'] = ic_org.mean()/ic_org.std()
ictable['skew'] = ic_org.skew()
ictable['kurtosis'] = ic_org.kurtosis()
if ictable['mean'] <0:
ictable['p-value'] = stats.ttest_1samp(ic_org,0,alternative='less').pvalue
else:
ictable['p-value'] = stats.ttest_1samp(ic_org,0,alternative='greater').pvalue
return ictable
def one_factor_ret_sharp(self,qreturn,ret_freq):
return qreturn.mean()/qreturn.std()*np.sqrt(252/ret_freq)
def factor_prepare(self,allfactors,fc,quantiles,qcut):
test_fc = allfactors[[fc]].copy().rename_axis(['date','symbol'])
res_df = self.one_factor_grouper(test_fc,fc,quantiles,qcut)# 序数标准化
return res_df
def factor_ret_test_sheet(self,
weight_method,
index_price, #'''基准日期是timestamp'''
fcname,
res_df,
Price,
days,
savedir,
demean = False):
from alphalens import utils
plottools = plot_tools()
mate_al = mate_alphalens()
tov_df = res_df.groupby(by = fcname+'_quantile').apply(lambda x: self.cal_turnover(x[fcname+'_quantile']/x[fcname+'_quantile'],days))
if len(tov_df.index.names)==1:
to = tov_df.mean(axis =1)
else:
to = tov_df.unstack().mean(axis = 1)
clean_factor,price = mate_al.index_mate(res_df.dropna(),Price)
fwr = utils.compute_forward_returns(price.stack(),price)
clean_factor[str(days)+'D'] = fwr[str(days)+'D']
clean_factor = clean_factor.reset_index()
clean_factor['date'] = clean_factor['date'].astype(str)
clean_factor = clean_factor.set_index(['date','asset']).dropna()
if index_price is not None:
clean_factor = mate_al.trans_ex_return(clean_factor,index_price,ret_col=[str(days)+'D'])
else:
clean_factor[str(days)+'D'] = clean_factor.groupby(level = 'date',group_keys = False).apply(lambda x: x[str(days)+'D']-x[str(days)+'D'].mean())
qreturn = self.one_factor_return(clean_factor,fcname,days,str(days)+'D',w_method = weight_method,demean=demean)
# ic
ic_table = self.one_factor_icir(clean_factor,fcname,str(days)+'D')
indicators = self.judge_material(qreturn,ic_table,days)
plottools.factor_plt(qreturn,to,indicators,fcname,days,savedir)
return qreturn,clean_factor,indicators
def judge_material(self,qreturn,ic_table,days):
from scipy import stats
indicators = ic_table.copy()
maxquantile = max(qreturn.columns)
lsret = qreturn[maxquantile] - qreturn[1]
groupmean = qreturn.mean(axis = 0)
groupmean_diff = groupmean.diff().dropna()
top_half = groupmean_diff.iloc[-5:]
top_sharp = qreturn[maxquantile].mean()/qreturn[maxquantile].std()*pow(252/days,1/2)
t,p_lsret =stats.ttest_1samp(lsret,0,alternative='greater')
t,p_groupmean =stats.ttest_1samp(groupmean_diff,0,alternative='greater')
t,p_tophalfmean = stats.ttest_1samp(top_half,0,alternative='greater')
indicators['TopQtl_SR'] = top_sharp
indicators['LSRet_pvalue'] = p_lsret
indicators['MeanRetDiff_pvalue'] = p_groupmean
indicators['TophalfMeanRetDiff_pvalue'] = p_tophalfmean
return indicators
def efficient_judge(self,indicators):
from scipy import stats
# 因子判别
'''
检验:
对两个都通过的是有用的因子
'''
if indicators['p-value']<= 0.05 and indicators['TopQtl_SR']>=1:
if indicators['TophalfMeanRetDiff_pvalue']<=0.3 and indicators['LSRet_pvalue']<=0.12:
# print(fc+'有用;头部{}组平均收益一阶差分p值{},多空收益p值{},ic_pvalue{},top组超额夏普{}'.format(int(maxquantile/2),p_top_halfmean,p_lsret))
return 1
# 且两个乘起来能跟原来的匹配;且另一个不能太差
# elif indicators['TophalfMeanRetDiff_pvalue']*indicators['LSRet_pvalue']<0.0025 and (indicators['TophalfMeanRetDiff_pvalue']/0.05 <= 0.1 or indicators['LSRet_pvalue']/0.05 <= 0.1) \
# and min(indicators['MeanRetDiff_pvalue'],indicators['TophalfMeanRetDiff_pvalue'])<=0.05 and indicators['TophalfMeanRetDiff_pvalue']<0.3:
# print(fc+'勉强有用;头部{}组平均收益一阶差分p值{},整体平均收益一阶差分p值{},多空收益p值{}'.format(int(maxquantile/2),p_top_halfmean,p_groupmean,p_lsret))
else:
return 2
return 0
def eff_classification(self,fc,indicator,judgefunc,strict_eff,unstrict_eff):
'''
输入:
因子矩阵
输出:
1、因子测试结果
2、噪声因子
'''
# 因子判别
if judgefunc(indicator) == 1:
strict_eff.append(fc)
unstrict_eff.append(fc)
elif judgefunc(indicator) == 2:
unstrict_eff.append(fc)
return strict_eff,unstrict_eff
class multi_factor_test(object):
def __init__(self) -> None:
self.sst = single_signal_test()
pass
def factors_abnormal_ret(self,factordf,return_col,factorlist,days,index_price = None,pricedf = None,longshort_return = False):
df = factordf.copy()
if pricedf is not None:
# 默认明收除今收
df[str(days)+'D'] = pricedf.pct_change(days,fill_method = None).shift(-days).stack()
if index_price is not None:
ml = mate_alphalens()
df,pricedf = ml.index_mate(df,pricedf)
df = ml.trans_ex_return(df,index_price,str(days)+'D')
df = df.rename(columns = {str(days)+'D':return_col+str(days)+'D'}).dropna(subset = return_col+str(days)+'D')
if longshort_return == False:
ret_k = df.groupby(level = 'date',group_keys = False).apply(lambda x: sm.formula.ols(return_col+str(days)+'D'+'~'+'+'.join(factorlist),data = x).fit().params)
del ret_k['Intercept']
else :
lscol = list(factordf)
quantiles = int(df[return_col+str(days)+'D'].groupby(level = 'date').count().mean()//100)
LSretList = []
for col in tqdm(lscol):
tmpdf = df.copy()
tmpdf[col+'_quantile'] = self.sst.one_factor_grouper(df,col,quantiles,False)[col+'_quantile']
qreturn = self.sst.one_factor_return(tmpdf,col,days,'ret{}D'.format(days),'factor_weighted',False)
LSretList.append(qreturn[max(list(qreturn))] - qreturn[1])
ret_k = pd.concat(LSretList,axis = 1)
ret_k.columns = lscol
return ret_k
def multif_barra_norm(self,allfactors,Bft):
df = allfactors.copy()
print('barra中性化....')
for fcname in tqdm(list(df)):
test_fc = df[[fcname]].copy().rename_axis(['date','symbol'])
residual_ols,params_ols = Bft.barra_compose(test_fc)
df[fcname] = residual_ols # 中性化之后的因子替换原始因子
return df
def multif_industry_norm(self,allfactors,industry_info):
df = allfactors.copy()
df['first_industry_name'] = industry_info
df = df.dropna(subset = 'first_industry_name').groupby(level = 'date',group_keys =False).apply(lambda x: x.groupby(by = 'first_industry_name',group_keys =False).apply(lambda x:x-x.mean(numeric_only=True)))
del df['first_industry_name']
return df
def multif_corr_ana(self,df,factornamelist): # 多因子相关性分析
# df:pd.DataFrame
# factornamelist: strlist
# multiindex: timestamp,code
# columns: nday_return, factorname1, factorname2...
df_ana = df[factornamelist].groupby(level = 'date').corr()
corr_mean = df_ana.groupby(level = 1).mean() # corr之后的矩阵第二层没名字,所以用1来表示;第二层是因子名
corr_ir = df_ana.groupby(level = 1).mean()/df_ana.groupby(level = 1).std()
return corr_mean.loc[list(corr_mean)],corr_ir.loc[list(corr_ir)]
def multif_pca_ana(self,originalFactor,domain_factor_nums): # 多因子pca分析
# originalFactor: pd.DataFrame
# multiindex: timestamp,code
# columns: factorname1, factorname2...
from sklearn import preprocessing
data = originalFactor.groupby(level = 'date', group_keys = False).apply(lambda x: preprocessing.scale(x))
data = np.vstack(data.values)
from sklearn.decomposition import PCA
pcaModel = PCA(domain_factor_nums)
pcaModel.fit(data)
pcaFactors = pcaModel.transform(data)
pcaFactors = pd.DataFrame(pcaFactors)
pcaFactors.index = originalFactor.index
pcaFactors.columns = ['pca_'+str(i) for i in range(domain_factor_nums)]
return pcaModel.explained_variance_,pcaModel.explained_variance_ratio_,pcaFactors
def batch_factors_test(self,weight_method,allfactors,Price,quantiles,days,qcut,savedir,index_price = None,demean = False):
returndict = {}
sst = single_signal_test()
for fc in tqdm(list(allfactors)):
res_df = sst.factor_prepare(allfactors,fc,quantiles,qcut)
sst.factor_ret_test_sheet(weight_method,index_price,fc,res_df,Price,days,savedir,demean)
returndict[fc] = res_df[[fc]]
return returndict
def multif_tsstable_test(self,originalData):
# originalFactor: pd.DataFrame
# multiindex: timestamp,code
# columns: factorname1, factorname2...
from statsmodels.tsa.stattools import adfuller
data = originalData.copy()#.groupby(level = 0).apply(lambda x: (x-x.mean())/x.std())不要再标准化了!!
mean_pvalue = data.groupby(level = 'date').apply(lambda x:x.mean()).apply(lambda x: adfuller(x)[1])
std_pvalue = data.groupby(level = 'date').apply(lambda x:x.std()).apply(lambda x: adfuller(x)[1])
skew_pvalue = data.groupby(level = 'date').apply(lambda x:x.skew()).apply(lambda x: adfuller(x)[1])
kurt_pvalue = data.groupby(level = 'date').apply(lambda x:x.kurt()).apply(lambda x: adfuller(x)[1])
yarn_pvalue = pd.concat([mean_pvalue,std_pvalue,skew_pvalue,kurt_pvalue],axis = 1)
yarn_pvalue.columns = ['mean','std','skew','kurt']
return yarn_pvalue
def del_updown_limit(self,factordf,daybar,text):
# 剔除涨跌停
notuplimit = daybar[~(daybar[text] == daybar.limit_up)]
notdownlimit = daybar[~(daybar[text] == daybar.limit_down)]
factordf = factordf[factordf.index.isin(notuplimit.index)]
factordf = factordf[factordf.index.isin(notdownlimit.index)]
return factordf
def in_some_pool(self,df,pool_components):
factordf = df.copy()
factordf['inpool']=pool_components.applymap(lambda x:1)
factordf['inpool'] = factordf['inpool'].apply(lambda x: 1 if x>0 else 0)
testdf = factordf[factordf['inpool']>=1]
del testdf['inpool']
return testdf
def orthog(self,factor_mat, y, xlist):
df = factor_mat.replace([np.inf, -np.inf], np.nan).dropna()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
params = regre.params[~(regre.params.index == 'Intercept')]
intercept = regre.params[(regre.params.index == 'Intercept')]
residual = df[y] - (df[list(params.index)]*params).sum(axis = 1) - intercept.values
residual = pd.DataFrame(residual)
residual.columns = [y]
return self.mat_normlize(residual),params
def mat_orthog(self,factor_mat):
temp1 = factor_mat.replace([np.inf, -np.inf], np.nan).dropna()
for i in list(temp1):
no = list(temp1).index(i)
if no==0:
temp1[i] = self.mat_normlize(temp1[i])
continue
fclist = list(filter(lambda x: x!=i,list(temp1)[:no]))
temp1[i] = self.orthog(temp1,i,fclist)[0]
return temp1
def ts_mat_orthog(self,factor_mat):
return factor_mat.groupby(level = 'date',group_keys = False).apply(self.mat_orthog)
def mat_normlize(self,factor_mat):
df = factor_mat.rename_axis(['date','symbol']).replace([np.inf, -np.inf], np.nan)
def norm(x):
return (x - x.min())/(x.max()-x.min())
return df.groupby(level = 'date',group_keys = False).apply(norm)
def mat_ranknormlize(self,factor_mat):
df = factor_mat.rename_axis(['date','symbol']).replace([np.inf, -np.inf], np.nan)
def norm(x):
x_rank = x.rank()
return (x_rank - x_rank.min())/(x_rank.max()-x_rank.min())
return df.groupby(level = 'date',group_keys = False).apply(norm)
def multindex_shift(self,fcdf):
df = fcdf.reset_index()
datelist = list(df['date'].drop_duplicates())
datedict = dict(zip(datelist[:-1],datelist[1:]))
df['date'] =df['date'].apply(lambda x: datedict[x] if x in datedict.keys() else np.nan)
return df.dropna(subset = 'date').set_index(['date','symbol'])
class Barra_factor_ana(object):
'''
1. growth要求至少504天的数据,部分股票不满足该条件会导致在因子整合到一起的时候被剔除
2. barrafactor必须为双重索引,且第一重索引是日期,第二重索引是标的
'''
def __init__(self,df=None,start_date=None,end_date=None,dir=None,skip_fileload=None) -> None:
# 预加载数据
if not skip_fileload:
self.price = df
dailyreturn = df/df.shift(1)-1
dailyreturn.dropna(how = 'all',inplace=True)
self.returndata = dailyreturn
self.start_date = start_date
self.end_date = end_date
import os
filelist = os.listdir(dir)
self.filedict = {}
for f in filelist:
if f[-3:]=='csv':
self.filedict[f[:-4]] = pd.read_csv(dir+f,index_col = [0,1])
pass
def rise_barra_factors(self):
print('rise size')
self.size = np.log(self.filedict['market_cap']).dropna()
def OLSparams(y,x):
print('rise beta')
X_ = x.droplevel('order_book_id')
df = y.copy()
df['market_r'] = X_['r']
df.dropna(subset = 'market_r',inplace = True)
dflist = list(df.rolling(252))[252:]
paramslist = []
for olsdf in dflist:
mod = sm.OLS(olsdf,sm.add_constant(olsdf['market_r']))
re = mod.fit()
params = re.params.T
params.index = olsdf.columns
params = params[params.index!='market_r']
params['date'] = olsdf.index[-1]
params = params.rename(columns = {'market_r':'beta'})
paramslist.append(params)
olsparams = pd.concat(paramslist).set_index('date',append=True).unstack().T
constdf = olsparams.loc['const'].ewm(halflife = 63,ignore_na = True,adjust = False).mean().stack()
betadf = olsparams.loc['beta'].ewm(halflife = 63,ignore_na = True,adjust = False).mean().stack()
# cal residual
mkt_df = pd.concat([X_['r']]*len(list(betadf.unstack())),axis = 1)
mkt_df.columns = list(betadf.unstack())
residual = y - betadf.unstack()*mkt_df - constdf.unstack() # 这里的residual已经是经过ewm的beta和const计算得到的就不用再ewm了
return {'beta':betadf,'const':constdf,'residual':residual}
def MOMTM(y):
df = np.log(1+y)
momtm = df.ewm(halflife=126,ignore_na = True,adjust = False).mean()#.iloc[-1:]
return momtm
def CMRA(y,T):
date = y.index[-1]
dflist= []
for i in range(1,T+1):
pct_n_month = pd.DataFrame((y/y.shift(21*i)-1).iloc[-1])/21
dflist.append(pct_n_month)
df = pd.concat(dflist,axis =1)
zmax = df.max(axis =1)
zmin = df.min(axis = 1)
cmra = pd.DataFrame(np.log(1+zmax)-np.log(1+zmin),columns = [date]).T
return cmra
def orthog(barrafactor,y,xlist):
df = barrafactor.copy()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
for p in xlist:
df[p]*= regre.params[p]
df[y+'_orth'] = df[y] - df[xlist].sum(axis = 1)-regre.params['Intercept']
return df[[y+'_orth']]
# beta
self.olsparams = OLSparams(self.returndata,self.filedict['market_r'])
self.beta = pd.DataFrame(self.olsparams['beta']).dropna()
self.beta.columns = ['beta']
# momentum
print('rise momentum')
# retroll504 = list(self.returndata.rolling(504))[504:]
# self.momtm = pd.concat(list(map(lambda x: MOMTM(x),retroll504))).shift(21).dropna(how = 'all')
self.momtm = MOMTM(self.returndata).shift(21).dropna(how = 'all')
self.momtm = pd.DataFrame(self.momtm.stack(),columns=['momentum'])
# residual volatility
print('rise residual volatility')
self.hist_volatility = self.returndata.ewm(halflife = 42,ignore_na = True,adjust = False).std().dropna(how = 'all')
CMRAlist = list(self.price.rolling(252))[252:]
self.CMRA = pd.concat(list(map(lambda x: CMRA(x,12),CMRAlist)))
self.Hsigma = self.olsparams['residual'].rolling(252,min_periods = 1).std()
self.residual_volatility = pd.DataFrame((self.hist_volatility*0.74+self.CMRA*0.16+self.Hsigma*0.1).stack()).dropna()
self.residual_volatility.columns = ['residual_volatility']
# non-linear size
print('rise non-linear size')
self.nlsize = (self.size**3).dropna()
self.nlsize.columns = ['nlsize']
# Bp
print('rise Bp')
self.Bp = self.filedict['Bp'].dropna()
# liquidity
print('rise Liquidity')
self.tvrdf = self.filedict['turnover']
self.liq_1m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(22,min_periods =1).mean())
self.liq_3m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(74,min_periods =1).mean())
self.liq_12m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(252,min_periods =1).mean())
self.liq = (0.35*self.liq_1m + 0.35*self.liq_3m + 0.3*self.liq_12m).dropna()
print('rise Earning Yield')
self.earning_yield = pd.concat([self.filedict['Ep'],self.filedict['Sp']],axis = 1)
self.earning_yield['earning_yield'] = self.earning_yield['ep_ratio_ttm']*0.66+self.earning_yield['sp_ratio_ttm']*0.34
self.earning_yield = self.earning_yield[['earning_yield']].dropna()
# growth
print('rise growth')
NP = self.filedict['NPGO'].unstack()
NP = (NP-NP.shift(504))/NP.shift(504).abs().replace(0,np.nan)
NP = NP.stack()
RVN = self.filedict['RGO'].unstack()
RVN = (RVN - RVN.shift(504))/RVN.shift(504).abs().replace(0,np.nan)
RVN = RVN.stack()
self.growth = pd.DataFrame(NP['net_profit_parent_company_ttm_0']*0.34+RVN['revenue_ttm_0']*0.66)
self.growth.columns = ['growth']
self.growth.dropna(inplace=True)
# leverage
print('rise leverage')
self.leverage = self.filedict['MLEV']['du_equity_multiplier_ttm']*0.38+self.filedict['DTOA']['debt_to_asset_ratio_ttm']*0.35+self.filedict['BLEV']['book_leverage_ttm']*0.27
self.leverage = pd.DataFrame(self.leverage)
self.leverage.columns = ['leverage']
self.leverage.dropna(inplace=True)
# concat
self.barrafactor = pd.concat([
self.size,
self.beta,
self.momtm,
self.residual_volatility,
self.nlsize,
self.Bp,
self.liq,
self.earning_yield,
self.growth,
self.leverage],axis = 1).sort_index(level = 0)
'''正则化'''
# 未经正则化的原始因子已存为类变量,可直接调用
print('Orthogonalizing....')
y = ['residual_volatility','nlsize','turnover']
xlist = ['circulation_A','beta']
# 不dropna会报错
self.barrafactor[y[0]] = self.barrafactor[[y[0]]+xlist].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[0],xlist))
self.barrafactor[y[1]] = self.barrafactor[[y[1]]+xlist[:1]].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[1],xlist[:1]))
self.barrafactor[y[2]] = self.barrafactor[[y[2]]+xlist[:1]].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[2],xlist[:1]))
# 标准化
def return_barra_factor(self,rank_normalize:bool):
mft = multi_factor_test()
if rank_normalize:
return mft.mat_ranknormlize(self.barrafactor)
else:
return mft.mat_normlize(self.barrafactor)
def barra_compose(self,factordata):
# 因子是rank数据
decompose = pd.concat([self.barrafactor,factordata],axis = 1).dropna().rename_axis(['date','symbol'])
def orthog(barrafactor,y,xlist):
df = barrafactor.copy()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
params = regre.params[~(regre.params.index == 'Intercept')]
intercept = regre.params[(regre.params.index == 'Intercept')]
residual = df[y] - (df[list(params.index)]*params).sum(axis = 1) - intercept.values
return residual,params
# 这种方法只算一天的会错
# residual_ols =decompose.groupby(level = 0).apply(lambda x: orthog(x,list(decompose)[-1],list(decompose)[:-1])[0]).droplevel(0)
# params_ols =decompose.groupby(level = 0).apply(lambda x: orthog(x,list(decompose)[-1],list(decompose)[:-1])[1])
# return residual_ols,params_ols
decomposebyday = list(decompose.groupby(level = 'date'))
residual_olslist = []
params_olslist = []
for df in decomposebyday:
x = df[1]
residual_ols,params_ols = orthog(x,list(decompose)[-1],list(decompose)[:-1])
residual_olslist.append(residual_ols)
params_olslist.append(pd.DataFrame(params_ols,columns = [df[0]]).T)
return pd.concat(residual_olslist),pd.concat(params_olslist)
def barra_style_pool(self,style,cutnum):
bystyle = self.barrafactor[[style]].copy()
bystyle[style+'_group'] = bystyle[style].dropna().groupby(level = 0,group_keys=False).apply(lambda x: pd.cut(x,cutnum,labels=list(range(1,cutnum+1))))
return bystyle
def factor_performance_bystyle(self,factordata,factorname,style,cutnum):
# 即便因子在风格上没有偏斜,仍然会有不同风格上因子表现不同的情况
bystyle = pd.concat([factordata,self.barrafactor[[style]]],axis = 1)
bystyle[style+'_group'] = bystyle[style].dropna().groupby(level = 0,group_keys=False).apply(lambda x: pd.cut(x,cutnum,labels=list(range(1,cutnum+1))))
ic_daily = bystyle.groupby(style+'_group',group_keys=False).apply(lambda x: x[[factorname,'nday_return']].groupby(level = 0).apply(lambda x: x.corr('spearman').iloc[0,1])).T
return ic_daily
class AutoMatic(object):
sst = single_signal_test()
mft = multi_factor_test()
def __init__(self,Bft,base_index,Price,quantiles,days,qcut,demean,weighted_method) -> None:
'''base_index基准价格时间索引是timestamp'''
self.Bft = Bft
self.Price = Price.copy()
self.base_index = base_index
self.quantiles = quantiles
self.days = days
self.qcut = qcut
self.demean = demean
self.weighted_method = weighted_method
pass
def AutoMatic_DirCheck(self,path):
if not os.path.exists(path):
os.makedirs(path)
def AutoMatic_Direc_Adjust(self,factors,dir_):
neu_factors = factors.copy()
direction_dict = {}
strict_eff = []
unstrict_eff = []
self.AutoMatic_DirCheck(dir_+'direction/')
self.AutoMatic_DirCheck(dir_+'direction/redirection/')
for fc in list(neu_factors):
res_df = self.sst.factor_prepare(neu_factors,fc,self.quantiles,self.qcut)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,tmp,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,fc,res_df,self.Price,self.days,dir_+'/direction/',self.demean)
if qreturn[list(qreturn)[0]].sum()<= qreturn[self.quantiles].sum():
print(fc+'是正向因子')
direction_dict[fc] = 1
if qreturn[list(qreturn)[0]].sum() > qreturn[self.quantiles].sum():
print(fc+'是负向因子')
neu_factors = neu_factors.copy()
neu_factors[fc]=self.mft.mat_normlize(-1*neu_factors[fc])
direction_dict[fc] = -1
res_df = self.sst.factor_prepare(neu_factors,fc,self.quantiles,self.qcut)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,tmp,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,fc,res_df,self.Price,self.days,dir_+'direction/redirection/',self.demean)
# 有效性判别
strict_eff,unstrict_eff = self.sst.eff_classification(fc,indicator,self.sst.efficient_judge,strict_eff,unstrict_eff)
return direction_dict,strict_eff,unstrict_eff
def AutoMatic_Factor_Merge_Ret(self,neu_factors,base_factors,mergename,dir_):
base_f = pd.DataFrame(neu_factors[base_factors].sum(axis = 1),columns = [mergename])
res_df = self.sst.factor_prepare(base_f,mergename,self.quantiles,self.qcut)
self.AutoMatic_DirCheck(dir_)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,clean_factor,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,mergename,res_df,self.Price,self.days,dir_,self.demean)
return qreturn,clean_factor
def AutoMatic_Compare_Indicator(self,qreturn,reverse):
if reverse:
maxq = min(list(qreturn))
base_perf_sr = -1*qreturn[maxq].mean()/qreturn[maxq].std()
return base_perf_sr
maxq = max(list(qreturn))
base_perf_sr = qreturn[maxq].mean()/qreturn[maxq].std()
return base_perf_sr
def threads_pool_run(self,params_batch):
InSampleFactors,i,dir_ = params_batch[0] , params_batch[1] , params_batch[2]
import matplotlib
matplotlib.use('agg')
savedir = dir_+'{}/'.format(i)
direction_dict,strict_eff,unstrict_eff =self.AutoMatic_Direc_Adjust(InSampleFactors,savedir) # 方向调整
return (direction_dict,strict_eff,unstrict_eff)
def AutoMatic_Stochastic_Optimizer(self,test_factor,threads_num,dir_):
dateset = list(set(test_factor.index.get_level_values('date')))
import multiprocessing
from multiprocessing import Pool
InSplList = []
for i in range(threads_num):
randomdate = sorted(np.random.choice(dateset,int(len(dateset)/5),replace = False))
InSplList.append((test_factor.loc[randomdate],i,dir_))
pool = Pool(min(multiprocessing.cpu_count(),threads_num))
return pool.map(self.threads_pool_run,InSplList)
def AutoMatic_Perf_InPool(self,neu_factors,base_factors,reverse,save_dir):
qreturn,tmp =self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors,'basef',save_dir+'temp/')
base_perf_sr= self.AutoMatic_Compare_Indicator(qreturn,reverse)
others = list(filter(lambda x: x not in base_factors,list(neu_factors)))
for sf in others:# 挨个加表现
print(base_factors)
qreturn,tmp =self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors+[sf],sf+'_basef',save_dir+'temp/')
perf_sr = self.AutoMatic_Compare_Indicator(qreturn,reverse)
print('是否超越此前表现:{};本次超额夏普:{},此前最佳超额夏普:{}'.format(perf_sr > base_perf_sr,perf_sr,base_perf_sr))
if perf_sr > base_perf_sr:
base_factors.append(sf)
if perf_sr > base_perf_sr:
base_perf_sr = perf_sr
qreturn,clean_factor = self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors,'basef',save_dir+'final/')
return qreturn,clean_factor
class plot_tools(object):
def __init__(self) -> None:
import matplotlib
self.plotstatus = matplotlib.get_backend()
pass
def trio_plt(self,qmean,qcum,quantiles): # 画收益图
import matplotlib.pyplot as plt
qmean[list(range(1,quantiles+1))].plot(kind= 'bar',title = 'mean')
plt.show()
qcum[list(range(1,quantiles+1))].plot(title = 'cumreturn')
plt.legend(loc = 'upper center',bbox_to_anchor=(1.1, 1.02))
plt.show()
(qcum[10]-qcum[1]).plot(title = 'long-short')
plt.show()
def fbplot(self,frontplot,bgplot,c,fname,bname):
# frontplot,bgplot:
# pd.Series
# multiindex: timestamp,code
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
tickspace = len(frontplot)//12
fig = plt.figure()
a1=fig.add_axes([0,0,1,1])
a1.bar(frontplot.index,bgplot.loc[frontplot.index],color = c)
a1.tick_params(axis='x', labelrotation= 30)
a1.xaxis.set_major_locator(ticker.MultipleLocator(tickspace))
a2 = a1.twinx()
a2.plot(frontplot.index,frontplot,color = 'red')
a2.tick_params(axis='x', labelrotation= 30)
a2.xaxis.set_major_locator(ticker.MultipleLocator(tickspace))
fig.legend(frameon = False,labels = [bname+'(left)',fname+'(right)'],loc = 'upper center')
plt.show()
def factor_plt(self,qreturn,to,ictable,fc,ndays,savedir=''):
from alphalens import utils
from pandas.plotting import table
numtable = pd.concat([qreturn.mean(),qreturn.sum(),qreturn.mean()/qreturn.std()],axis = 1).rename(columns= {0:'avg',1:'sum',2:'risk-adj'}).T
top_quantile = max(list(qreturn))
totalSeed = qreturn.index
xticks = list(range(0, len(totalSeed), 60))
xlabels = [str(totalSeed[x]) for x in xticks]
import matplotlib.pyplot as plt
plt.figure(dpi=300, figsize=(24, 12))
ax = plt.subplot(321,frame_on=False,title = fc+'_retsheet_bygroup')
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, numtable.round(5), loc='center') # 将df换成需要保存的dataframe即可
ax = plt.subplot(365,frame_on=False,title = str(ndays)+'days_information')
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, ictable.round(5), loc='center') # 将df换成需要保存的dataframe即可
plt.subplot(325,title = fc+'_cumret_bygroup')
plt.plot(qreturn.index,qreturn.cumsum(),label = list(qreturn))
plt.legend()
plt.xticks(rotation=90)
plt.xticks(ticks=xticks, labels=xlabels)
plt.subplot(324,title = fc+'_turnover_bygroup')
plt.bar(to.index,to,color="blue")
plt.subplot(323,title = fc+'_avgret_bygroup')
plt.bar(qreturn.mean().index,qreturn.mean(),color="y")
plt.subplot(326,title = fc+'_lsret_bygroup')
plt.plot(qreturn.index,(qreturn[top_quantile]-qreturn[1]).cumsum(),color="g")
plt.xticks(rotation=90)
plt.xticks(ticks=xticks, labels=xlabels)
try:
os.remove(savedir+fc+'.jpg')
print(fc+'.jpg'+' 旧文件删除')
except:
print(fc+'.jpg'+' 是新文件')
plt.savefig(savedir+fc+'.jpg')
if self.plotstatus != 'agg':
plt.show()
plt.close()
# 热力图展示
def ShowHeatMap(self,DataFrame,savedir='',triangle = True):
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(35, 15))
ax.set_title('Wine GRA')
# 设置展示一半,如果不需要注释掉mask即可
if triangle:
mask = np.zeros_like(DataFrame)
mask[np.triu_indices_from(mask)] = True # np.triu_indices 上三角矩阵
with sns.axes_style("white"):
sns.heatmap(DataFrame,
cmap="YlGnBu",
annot=True,
mask=mask,
)
else :
with sns.axes_style("white"):
sns.heatmap(DataFrame,
cmap="YlGnBu",
annot=True,
)
plt.savefig(savedir)
if self.plotstatus != 'agg':
plt.show()
def combine_imgs_pdf(self,folder_path, pdf_file_path,idstname):
import os
from PIL import Image
"""
合成文件夹下的所有图片为pdf
Args:
folder_path (str): 源文件夹
pdf_file_path (str): 输出路径
"""
files = os.listdir(folder_path)
png_files = []
sources = []
for file in files:
if 'png' in file or 'jpg' in file:
png_files.append(folder_path + file)
png_files.sort()
for file in png_files:
png_file = Image.open(file)
png_file = png_file.convert("RGB")
sources.append(png_file)
sources[0].save(pdf_file_path+'{}.pdf'.format(idstname), "pdf", save_all=True, append_images=sources[1:],quality = 95)
class mate_alphalens(object):
def __init__(self) -> None:
pass
def index_mate(self,factordata,price):
fcdf = factordata.reset_index()
fcdf['date'] = pd.to_datetime(fcdf['date'])
fcdf = fcdf.rename(columns = {'symbol':'asset'}).set_index(['date','asset'])
ptemp = price.copy()
ptemp.index = pd.to_datetime(ptemp.index)
return fcdf,ptemp
def trans_ex_return(self,clean_factor,index_price,ret_col):
from alphalens import utils
index_price['factor'] = 1
base_ret = utils.compute_forward_returns(index_price[['factor']],index_price['close'].unstack())
base_ret = base_ret.droplevel('asset').reindex(clean_factor.index.get_level_values(0))
base_ret['asset'] = clean_factor.index.get_level_values('asset')
base_ret = base_ret.set_index(['asset'],append=True)
df = clean_factor.copy()
df[ret_col]= df[ret_col]-base_ret[ret_col]
return df
class alert(object):
def __init__(self,**file):
if file:
self.filename = file
else:
import sys
self.filename = sys.argv[0]
pass
def finish_alert(self):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
# 1. 连接邮箱服务器
con = smtplib.SMTP_SSL('smtp.qq.com', 465)
# 2. 登录邮箱
con.login('[email protected]', 'jwtjvrktevlobiag')
# 2. 准备数据
# 创建邮件对象
msg = MIMEMultipart()
# 设置邮件主题
subject = Header('{}程序运行完毕'.format(self.filename), 'utf-8').encode()
msg['Subject'] = subject
# 设置邮件发送者
msg['From'] = '[email protected]'
# 设置邮件接受者
msg['To'] = '[email protected]'
# 添加⽂文字内容
text = MIMEText('{}程序运行完毕'.format(self.filename), 'plain', 'utf-8')
msg.attach(text)
# 3.发送邮件
con.sendmail('[email protected]', '[email protected]', msg.as_string())
con.quit()
def breakdown_alert(self):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
# 1. 连接邮箱服务器
con = smtplib.SMTP_SSL('smtp.qq.com', 465)
# 2. 登录邮箱
con.login('[email protected]', 'jwtjvrktevlobiag')
# 2. 准备数据
# 创建邮件对象
msg = MIMEMultipart()
# 设置邮件主题
subject = Header('{}程序运行失败'.format(self.filename), 'utf-8').encode()
msg['Subject'] = subject
# 设置邮件发送者
msg['From'] = '[email protected]'
# 设置邮件接受者
msg['To'] = '[email protected]'
# 添加⽂文字内容
text = MIMEText('{}程序运行失败'.format(self.filename), 'plain', 'utf-8')
msg.attach(text)
# 3.发送邮件
con.sendmail('[email protected]', '[email protected]', msg.as_string())
con.quit()
# sst:
# def noise_judge(self,qreturn,fc):
# from scipy import stats
# from statsmodels.stats.diagnostic import acorr_ljungbox
# # 因子判别
# lsret,groupmean,groupmean_diff,top_sharp = self.judge_material(qreturn,fc)
# '''
# 检验:
# 对两个都没通过的可能是噪声的因子做自相关性检验,因为0假设是有相关性,所以哪怕只有一点自相关性(123123)都可能不会被拒绝,所以被拒绝的基本上可认定为噪声
# '''
# t,p_lsret =stats.ttest_1samp(lsret,0,alternative='greater')
# t,p_groupmean = stats.ttest_1samp(groupmean_diff,0,alternative='greater')
# if p_groupmean>0.05 and p_lsret>0.05:
# print(fc+'可能是噪声;分组平均收益一阶差分p值{},多空收益p值{}'.format(p_groupmean,p_lsret))
# # ls_ljung = acorr_ljungbox(lsret.cumsum(), lags=[1,5,10,20])
# gmdf_ljung = acorr_ljungbox(groupmean, lags=[1,5])
# if gmdf_ljung['lb_pvalue'].min()>=0.05:
# print(fc+'是噪声;分组平均收益自相关检验最小p值{}'.format(gmdf_ljung['lb_pvalue'].min()))
# return True
# else:
# print('无法判定'+fc+'是噪声;分组平均收益自相关检验最小p值{}'.format(gmdf_ljung['lb_pvalue'].min()))
# return False
# def short_judge(self,qreturn,fc):
# from scipy import stats
# # 因子判别
# lsret,groupmean,groupmean_diff = self.judge_material(qreturn,fc)
# '''
# 检验:
# 对两个都通过的是有用的因子
# '''
# maxquantile = max(list(lsret))
# top5 = groupmean_diff.iloc[-5:]
# bottom5 = groupmean_diff.iloc[:5]
# t,p_top5 = stats.ttest_1samp(top5,0,alternative='greater')
# t,p_bottom5 = stats.ttest_1samp(bottom5,0,alternative='greater')
# if p_top5>0.5 and p_bottom5<0.1 and (abs(groupmean.iloc[-1])<abs(groupmean.iloc[0])):
# print(fc+'是空头因子;top5组平均收益一阶差分p值{},bottom5组平均收益一阶差分p值{}'.format(p_top5,p_bottom5))
# return True
# return False
# mft:
# def multif_denoisies(self,noise_factors_list,allfactors,threshold):
# '''
# 输入:
# 因子矩阵,噪声因子
# 输出:
# 去噪后的因子
# '''
# if len(noise_factors_list)==0:
# print('无可用于去噪的噪声')
# return allfactors
# other_factors_df = allfactors[list(filter(lambda x: x not in noise_factors_list,list(allfactors)))]
# noise_factors_df = self.ts_mat_orthog(allfactors[noise_factors_list])
# factordf = pd.concat([other_factors_df,noise_factors_df],axis = 1)
# # 去噪
# other_factors = list(other_factors_df)
# corrdf = self.multif_corr_ana(factordf,list(factordf))[0]
# print('相关性详情:')
# print(corrdf)
# corrdf = corrdf.loc[other_factors,noise_factors_list].abs().max(axis = 1)
# print('要被去噪的因子:')
# corr_with_noise = list(corrdf[corrdf>=threshold].index)
# print(corr_with_noise)
# for fc in corr_with_noise:
# factordf[fc] = self.orthog(factordf, fc, noise_factors_list)[0]
# return factordf[other_factors]
# def multif_cal_weight(self,factordf,factorlist,return_col,weight_type):
# # factordf: pd.DataFrame
# # multiindex: timestamp,code
# # columns: factorname1, factorname2...,returndata
# # factorlist: strlist
# # return_col: column name, str
# df = factordf.copy()
# ret_k = self.fators_abnormal_ret(df,return_col,factorlist)
# ic = df.groupby(level = 'date').apply(lambda x: x.corr(method= 'spearman')[return_col])
# del ic['ret']
# weight = ret_k*ic
# direc = ic.mean().apply(lambda x: 1 if x>0 else -1)
# if weight_type == 1:
# return weight.mean()/weight.std()*direc
# elif weight_type == 2:
# return weight.mean()*direc
# else:
# return direc
# # if weight_type == '风险平价加权':
# # cov = weight[factorlist].cov()
# # from scipy.optimize import minimize
# # def objective(x):
# # w_cov = np.dot(cov,x.T)
# # for n in range(len(x)):
# # w_cov[n] *= x[n]
# # mat = np.array([w_cov]*len(x))
# # scale = 1/sum(abs(mat))
# # return np.sum(abs(scale*(mat-mat.T)))
# # initial_w=np.array([0.2]*len(factorlist))
# # cons = []
# # cons.append({'type':'eq','fun':lambda x: sum(x)-1})
# # for i in range(len(initial_w)):
# # cons.append({'type':'ineq','fun':lambda x: x[i]})
# # #结果
# # res=minimize(objective,initial_w,method='SLSQP',constraints=cons)
# # params = pd.Series(res.x)
# # params.index = cov.index
# # return params
# def weighted_factor(self,factordf,weight):
# # factordf: pd.DataFrame
# # multiindex: timestamp,code
# # columns: factorname1, factorname2...
# # weight:pd.Series
# wf = (weight*factordf).sum(axis = 1)
# return pd.DataFrame(wf,columns = ['weighted_factor'])
|
Alpha-Planet
|
/Alpha_Planet-0.0.2-py3-none-any.whl/Alpha_Rabbit/Alpha_Rabbit.py
|
Alpha_Rabbit.py
|
from Alpha_Rabbit.Factor_Def_and_Get_Method import *
_method = Factor_get_method()
def Factor_Calculator(pricebyday,minbar,conn,todaydate,notst,factors_to_cal):
######################################## 日间数据计算因子 ####################################
uploadfactordict = {}
Close = pricebyday['close'].unstack().sort_index()
Open = pricebyday['open'].unstack().sort_index()
High = pricebyday['high'].unstack().sort_index()
Low = pricebyday['low'].unstack().sort_index()
volume = pricebyday[['volume']].pivot_table(index = 'date',columns = 'symbol',values = 'volume').sort_index()
total_turnover = pricebyday[['total_turnover']].pivot_table(index = 'date',columns = 'symbol',values = 'total_turnover').sort_index()
tovr_r = pricebyday['turnover_ratio'].unstack().sort_index()
Close_ret = Close.pct_change()
tempClose = Close.iloc[-30:]
tempOpen = Open.iloc[-30:]
tempHigh = High.iloc[-30:]
tempLow = Low.iloc[-30:]
anaual_close = Close.iloc[-272:]
anaual_High = High.iloc[-272:]
if 'mmt_intraday_M' in factors_to_cal or factors_to_cal == 'all':
# 1个月日内动量
uploadfactordict['mmt_intraday_M'] = mmt_intraday_M(tempClose,tempOpen)
if 'mmt_range_M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅调整动量
uploadfactordict['mmt_range_M'] = mmt_range_M(tempHigh,tempLow,tempClose)
if 'mmt_overnight_M' in factors_to_cal or factors_to_cal == 'all':
# 隔夜动量
uploadfactordict['mmt_overnight_M'] = mmt_overnight_M(tempOpen,tempClose)
if 'mmt_route_M' in factors_to_cal or factors_to_cal == 'all':
# 路径调整动量
uploadfactordict['mmt_route_M'] = mmt_route_M(tempClose)
if 'mmt_discrete_M' in factors_to_cal or factors_to_cal == 'all':
# 信息离散度动量
uploadfactordict['mmt_discrete_M'] = mmt_discrete_M(tempClose)
if 'mmt_sec_rank_M' in factors_to_cal or factors_to_cal == 'all':
# 截面rank动量
uploadfactordict['mmt_sec_rank_M'] = mmt_sec_rank_M(tempClose)
if 'mmt_time_rank_M' in factors_to_cal or factors_to_cal == 'all':
# 时序rank_score
uploadfactordict['mmt_time_rank_M'] = mmt_time_rank_M(anaual_close)
if 'mmt_highest_days_A' in factors_to_cal or factors_to_cal == 'all':
# 最高价距今天数
uploadfactordict['mmt_highest_days_A'] = mmt_highest_days_A(anaual_High)
if 'volumestable' in factors_to_cal or factors_to_cal == 'all':
# 成交量稳定度
uploadfactordict['volumestable'] = volumestable(volume)
if '_con' in factors_to_cal or factors_to_cal == 'all':
# 收益一致性因子
uploadfactordict['_con'] = re_con(tempClose)
if 'bofu_money' in factors_to_cal or factors_to_cal == 'all':
# 波幅/成交额
uploadfactordict['bofu_money'] = bofu_money(tempHigh,tempLow,tempOpen,total_turnover)
if 'vol_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月收益率波动
uploadfactordict['vol_std_1M'] = vol_std(Close_ret,'1M',30)
if 'vol_up_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上行收益率波动
uploadfactordict['vol_up_std_1M'] = vol_up_std(Close_ret,'1M',30)
if 'vol_down_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下行收益率波动
uploadfactordict['vol_down_std_1M'] = vol_down_std(Close_ret,'1M',30)
if 'vol_updown_ratio_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上行波动和下行波动比
uploadfactordict['vol_updown_ratio_1M'] = vol_updown_ratio(Close_ret,'1M',30)
if 'vol_highlow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅均值
uploadfactordict['vol_highlow_avg_1M'] = vol_highlow_avg(High,Low,'1M',30)
if 'vol_highlow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅波动
uploadfactordict['vol_highlow_std_1M'] = vol_highlow_std(High,Low,'1M',30)
if 'vol_highlow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅稳定度
uploadfactordict['vol_highlow_stable_1M'] = vol_highlow_stable(High,Low,'1M',30)
if 'vol_upshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线均值
uploadfactordict['vol_upshadow_avg_1M'] = vol_upshadow_avg(High,Open,Close,'1M',30)
if 'vol_upshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线波动
uploadfactordict['vol_upshadow_std_1M'] = vol_upshadow_std(High,Open,Close,'1M',30)
if 'vol_upshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线稳定度
uploadfactordict['vol_upshadow_stable_1M'] = vol_upshadow_stable(High,Open,Close,'1M',30)
if 'vol_downshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线均值
uploadfactordict['vol_downshadow_avg_1M'] = vol_downshadow_avg(Low,Open,Close,'1M',30)
if 'vol_downshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线波动
uploadfactordict['vol_downshadow_std_1M'] = vol_downshadow_std(Low,Open,Close,'1M',30)
if 'vol_downshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线稳定度
uploadfactordict['vol_downshadow_stable_1M'] = vol_downshadow_stable(Low,Open,Close,'1M',30)
if 'vol_w_upshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线均值
uploadfactordict['vol_w_upshadow_avg_1M'] = vol_w_upshadow_avg(High,Close,'1M',30)
if 'vol_w_upshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线波动
uploadfactordict['vol_w_upshadow_std_1M'] = vol_w_upshadow_std(High,Close,'1M',30)
if 'vol_w_upshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线稳定度
uploadfactordict['vol_w_upshadow_stable_1M'] = vol_w_upshadow_stable(High,Close,'1M',30)
if 'vol_w_downshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线均值
uploadfactordict['vol_w_downshadow_avg_1M'] = vol_w_downshadow_avg(Low,Close,'1M',30)
if 'vol_w_downshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线波动
uploadfactordict['vol_w_downshadow_std_1M'] = vol_w_downshadow_std(Low,Close,'1M',30)
if 'vol_w_downshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线稳定度
uploadfactordict['vol_w_downshadow_stable_1M'] = vol_w_downshadow_stable(Low,Close,'1M',30)
if 'liq_turn_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月换手均值
uploadfactordict['liq_turn_avg_1M'] = liq_turn_avg(tovr_r,'1M',30)
if 'liq_turn_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月换手方差
uploadfactordict['liq_turn_std_1M'] = liq_turn_std(tovr_r,'1M',30)
if 'liq_vstd_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月成交波动比
uploadfactordict['liq_vstd_1M'] = liq_vstd(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子均值
uploadfactordict['liq_amihud_avg_1M'] = liq_amihud_avg(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子波动
uploadfactordict['liq_amihud_std_1M'] = liq_amihud_std(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子稳定度
uploadfactordict['liq_amihud_stable_1M'] = liq_amihud_stable(tovr_r,Close_ret,'1M',30)
if 'liq_shortcut_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子均值
uploadfactordict['liq_shortcut_avg_1M'] = liq_shortcut_avg(tovr_r,High,Low,Open,Close,'1M',30)
if 'liq_shortcut_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子波动
uploadfactordict['liq_shortcut_std_1M'] = liq_shortcut_std(tovr_r,High,Low,Open,Close,'1M',30)
if 'liq_shortcut_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子稳定度
uploadfactordict['liq_shortcut_stable_1M'] = liq_shortcut_stable(tovr_r,High,Low,Open,Close,'1M',30)
if 'PLUS' in factors_to_cal or factors_to_cal == 'all':
# 上下影线差
uploadfactordict['PLUS'] = PLUS(tempClose,tempHigh,tempLow)
if 'liq_std_w_plus_1M' in factors_to_cal or factors_to_cal == 'all':
# 上下影线差*换手波动
uploadfactordict['liq_std_w_plus_1M'] = liq_std_w_plus(tempClose,tempHigh,tempLow, tovr_r,'1M',30)
if 'HL_Sprd' in factors_to_cal or factors_to_cal == 'all':
# 理想振幅因子
uploadfactordict['HL_Sprd'] = HL_Sprd(Close,High,Low,20)
if 'tvr_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 换手率稳定度
uploadfactordict['tvr_std_1M'] = tvr_std(tovr_r,'1M',20)
if 'corr_price_turn_1M' in factors_to_cal or factors_to_cal == 'all':
# 换手率与价格的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-20:]
uploadfactordict['corr_price_turn_1M'] = corr_price_turn(timerange,pricebyday,'1M')
if 'corr_ret_turn_post_1M' in factors_to_cal or factors_to_cal == 'all':
# 收益率与换手率的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-21:] # 涉及到计算pct_change需要多一天
uploadfactordict['corr_ret_turn_post_1M'] = corr_ret_turn_post(timerange,pricebyday,'1M')
if 'corr_ret_turnd_1M' in factors_to_cal or factors_to_cal == 'all':
# 收益率与换手率变动的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-21:]
uploadfactordict['corr_ret_turnd_1M'] = corr_ret_turnd(timerange,pricebyday,'1M')
######################################## 日内数据计算因子 ####################################
# #单笔成交金额相关因子
sing_trade_amt = pd.DataFrame(minbar['total_turnover']/minbar['num_trades'],columns= ['single_trade_amt'])
sing_trade_amt = sing_trade_amt[sing_trade_amt['single_trade_amt']>0]
sing_trade_amt['trading_date'] = todaydate
sta_del_extrm = sing_trade_amt.groupby(level = 0).apply(lambda x: x.sort_values('single_trade_amt').iloc[:-10]).droplevel(0)# 剔除极大值
sta_50pct = sing_trade_amt.groupby(level = 0).\
apply(lambda x: x[x['single_trade_amt']<x['single_trade_amt'].quantile(0.5)]).droplevel(0)# 后百分之五十
if 'mts' in factors_to_cal or factors_to_cal == 'all':
# 主力交易强度
uploadfactordict['mts'] = mts(sta_del_extrm,minbar,todaydate)
if 'mte' in factors_to_cal or factors_to_cal == 'all':
# 主力交易情绪
uploadfactordict['mte'] = mte(sta_del_extrm,minbar,todaydate)
if 'qua' in factors_to_cal or factors_to_cal == 'all':
# 分位数因子qua
uploadfactordict['qua'] = qua(sta_del_extrm,todaydate)
if 'qua20m' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('qua',notst.index[-20],conn)
q = qua(sta_del_extrm,todaydate)
qua20m = pd.concat([prv_factor,q]).unstack().rolling(20,min_periods=1).mean().iloc[-1:].stack().rename(columns = {'qua':'qua20m'})
uploadfactordict['qua20m'] = qua20m
if 'skew' in factors_to_cal or factors_to_cal == 'all':
# 偏度因子skew
uploadfactordict['skew'] = skew(sta_50pct,todaydate)
if 'skew20m' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('skew',notst.index[-20],conn)
sk = skew(sta_50pct,todaydate)
skew20m = pd.concat([prv_factor,sk]).unstack().rolling(20,min_periods=1).mean().iloc[-1:].stack().rename(columns = {'skew':'skew20m'})
uploadfactordict['skew20m'] = skew20m
if 's_reverse' in factors_to_cal or factors_to_cal == 'all':
# 强反转因子
uploadfactordict['s_reverse'] = s_reverse(sing_trade_amt,minbar,todaydate)
if 's_reverse_10_sum' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('s_reverse',notst.index[-10],conn)
sr = s_reverse(sing_trade_amt,minbar,todaydate)
s_reverse_10_sum = pd.concat([prv_factor,sr]).unstack().rolling(10,min_periods=1).sum().iloc[-1:].stack().rename(columns = {'s_reverse':'s_reverse_10_sum'})
uploadfactordict['s_reverse_10_sum'] = s_reverse_10_sum
if 'daily_sta_90pct' in factors_to_cal or factors_to_cal == 'all':
# 理想反转因子
uploadfactordict['daily_sta_90pct'] = daily_sta_90pct(sta_del_extrm)
if 'ideal_reverse' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('daily_sta_90pct',notst.index[-20],conn)
dsta90 = daily_sta_90pct(sing_trade_amt)
daily_sta_cal = pd.concat([prv_factor,dsta90])
uploadfactordict['ideal_reverse'] = ideal_reverse(daily_sta_cal,Close)
return uploadfactordict
|
Alpha-Planet
|
/Alpha_Planet-0.0.2-py3-none-any.whl/Alpha_Rabbit/Factor_Calculator.py
|
Factor_Calculator.py
|
import pandas as pd
import numpy as np
class Factor_get_method(object):
def __init__(self) -> None:
pass
def get_all_tables(self,con):
sql = "select name from sqlite_master where type ='table' order by name"
c = con.cursor()
result = c.execute(sql)
factorfilelist = [i[0] for i in result.fetchall()]
return factorfilelist
def sql_fetch(self,con,tablename):
cursorObj = con.cursor()
cursorObj.execute('PRAGMA table_info("{}")'.format(tablename))
return cursorObj.fetchall()
def sql_exec(self,sql,sqlcols,conn):
cur = conn.cursor()
result = cur.execute(sql)
result = pd.DataFrame(result,columns = sqlcols).set_index(['date','symbol'])
return result
def get_prev_days_factor_by_name(self,factorname:str,date:str,conn):
sql = "select * from {} where {}.date >= '{}'".format(factorname,factorname,date)
sqlcols = [txt[1] for txt in self.sql_fetch(conn,factorname)]
return self.sql_exec(sql,sqlcols,conn)
def get_selected_date_factor_by_name(self,factorname:str,date:str,conn):
sql = "select * from {} where {}.date = '{}'".format(factorname,factorname,date)
sqlcols = [txt[1] for txt in self.sql_fetch(conn,factorname)]
return self.sql_exec(sql,sqlcols,conn)
def mmt_intraday_M(tempClose,tempOpen):
# 1个月日内动量
mmt_intraday_M = (tempClose/tempOpen - 1).iloc[-22:].cumsum()
mmt_intraday_M = pd.DataFrame(mmt_intraday_M.iloc[-1:].stack(),columns = ['mmt_intraday_M'])
return mmt_intraday_M
# 一个月振幅调整动量
def mmt_range_M(tempHigh,tempLow,tempClose):
High_m = tempHigh.iloc[-22:].max()
Low_m = tempLow.iloc[-22:].min()
mmt_range_M = (High_m-Low_m)/tempClose.shift(22)
mmt_range_M = pd.DataFrame(mmt_range_M.iloc[-1:].stack(),columns = ['mmt_range_M'])
return mmt_range_M
def mmt_overnight_M(tempOpen,tempClose):
# 隔夜动量
mmt_overnight = tempOpen/tempClose.shift(1) - 1
todaydate = mmt_overnight.index[-1]
mmt_overnight_M = pd.DataFrame(mmt_overnight.iloc[-20:].sum(),columns = ['mmt_overnight_M'])
mmt_overnight_M['date'] = todaydate
mmt_overnight_M = mmt_overnight_M.reset_index().set_index(['date','symbol'])
return mmt_overnight_M
def mmt_route_M(tempClose):
# 路径调整动量
mmt_route_M = (tempClose/tempClose.shift(20) - 1)/abs(tempClose/tempClose.shift(1)-1).rolling(20).sum()
mmt_route_M = pd.DataFrame(mmt_route_M.iloc[-1:].stack(),columns = ['mmt_route_M'])
return mmt_route_M
def mmt_discrete_M(tempClose):
# 信息离散度动量
daily_up = (tempClose/tempClose.shift(1)-1).applymap(lambda x: int(x>0) if not np.isnan(x) else np.nan)
daily_down = (tempClose/tempClose.shift(1)-1).applymap(lambda x: int(x<0) if not np.isnan(x) else np.nan)
mmt_discrete_M = daily_up.rolling(20).sum()/20-daily_down.rolling(20).sum()/20
mmt_discrete_M = pd.DataFrame(mmt_discrete_M.iloc[-1:].stack(),columns = ['mmt_discrete_M'])
return mmt_discrete_M
def mmt_sec_rank_M(tempClose):
# 截面rank动量
mmt_sec_rank_M = (tempClose/tempClose.shift(1)-1).rank(axis = 1).rolling(20).mean()
mmt_sec_rank_M = pd.DataFrame(mmt_sec_rank_M.iloc[-1:].stack(),columns = ['mmt_sec_rank_M'])
return mmt_sec_rank_M
def mmt_time_rank_M(anaual_close):
# 时序rank_score
# anaual_close = Close.iloc[-272:]
mmt_time_rank_M = (anaual_close/anaual_close.shift(1)-1).rolling(252,min_periods = 100).rank().rolling(20).mean()
mmt_time_rank_M = pd.DataFrame(mmt_time_rank_M.iloc[-1:].stack(),columns = ['mmt_time_rank_M'])
return mmt_time_rank_M
def mmt_highest_days_A(anaual_High):
# 最高价距今天数
todaydate = anaual_High.index[-1]
mmt_highest_days_A = 252- anaual_High.iloc[-252:].apply(lambda x: x.argmax())
mmt_highest_days_A = pd.DataFrame(mmt_highest_days_A,columns= ['mmt_highest_days_A'])
mmt_highest_days_A['date'] = todaydate
mmt_highest_days_A = mmt_highest_days_A.reset_index().set_index(['date','symbol'])
return mmt_highest_days_A
def volumestable(volume):
# 成交量稳定度
vol_m = volume.rolling(20).mean()
vol_std = volume.rolling(20).std()
volumestable = (vol_m/vol_std)
volumestable = pd.DataFrame(volumestable.iloc[-1:].stack(),columns = ['volumestable'])
return volumestable
def re_con(tempClose):
# 收益一致性因子
import numpy as np
d5_r = tempClose.pct_change(5).iloc[-1:]/5
d10_r = tempClose.pct_change(10).iloc[-1:]/10/np.sqrt(2)
d15_r = tempClose.pct_change(15).iloc[-1:]/15/np.sqrt(3)
con = pd.concat([d5_r.stack(),d10_r.stack(),d15_r.stack()],axis = 1).dropna()
con = con.mean(axis =1)/con.std(axis = 1)
con = con.unstack()
con_output = con.rank(axis = 1)
con_output = con_output.apply(lambda x: x-x.mean(),axis = 1).abs()
_con = pd.DataFrame(con_output.iloc[-1:].stack(),columns = ['_con'])
return _con
def bofu_money(tempHigh,tempLow,tempOpen,total_turnover):
# 波幅/成交额
bofu_money = (tempHigh-tempLow)/tempOpen/total_turnover
bofu_money = pd.DataFrame(bofu_money.iloc[-1:].stack(),columns = ['bofu_money'])
return bofu_money
def vol_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = pd.DataFrame(ret.std(),columns = ['vol_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_up_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = (ret*(ret>0).astype(int)).replace(0,np.nan)
df = pd.DataFrame(df.std(),columns = ['vol_up_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_down_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = (ret*(ret<0).astype(int)).replace(0,np.nan)
df = pd.DataFrame(df.std(),columns = ['vol_down_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_highlow_avg(high,low,periodname,perioddays):
ratio = (high/low).iloc[-perioddays:]
todaydate = ratio.index[-1]
df = pd.DataFrame(ratio.mean(),columns = ['vol_highlow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_highlow_std(high,low,periodname,perioddays):
ratio = (high/low).iloc[-perioddays:]
todaydate = ratio.index[-1]
df = pd.DataFrame(ratio.std(),columns = ['vol_highlow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_updown_ratio(df,periodname,perioddays):
upstd = vol_up_std(df,periodname,perioddays)
downstd = vol_down_std(df,periodname,perioddays)
updownratio = pd.DataFrame(upstd['vol_up_std_'+periodname]/downstd['vol_down_std_'+periodname],columns = ['vol_updown_ratio_'+periodname])
return updownratio
def vol_highlow_stable(high,low,periodname,perioddays):
hlavg = vol_highlow_avg(high,low,periodname,perioddays)
hlstd = vol_highlow_std(high,low,periodname,perioddays)
hlstable = pd.DataFrame(hlavg['vol_highlow_avg_'+periodname]/hlstd['vol_highlow_std_'+periodname],columns = ['vol_highlow_stable_'+periodname])
return hlstable
def vol_upshadow_avg(High,Open,Close,periodname,perioddays):
multiper = (Open>Close).astype(int)
Open_Close_max = multiper*Open + (1-multiper)*Close
upshadow_df = ((High - Open_Close_max)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.mean(),columns = ['vol_upshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_upshadow_std(High,Open,Close,periodname,perioddays):
multiper = (Open>Close).astype(int)
Open_Close_max = multiper*Open + (1-multiper)*Close
upshadow_df = ((High - Open_Close_max)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.std(),columns = ['vol_upshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_upshadow_stable(High,Open,Close,periodname,perioddays):
avg = vol_upshadow_avg(High,Open,Close,periodname,perioddays)
std = vol_upshadow_std(High,Open,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_upshadow_avg_'+periodname]/std['vol_upshadow_std_'+periodname],columns = ['vol_upshadow_stable_'+periodname])
return df
def vol_downshadow_avg(Low,Open,Close,periodname,perioddays):
multiper = (Open<Close).astype(int)
Open_Close_min = multiper*Open + (1-multiper)*Close
downshadow_df = ((Open_Close_min - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.mean(),columns = ['vol_downshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_downshadow_std(Low,Open,Close,periodname,perioddays):
multiper = (Open<Close).astype(int)
Open_Close_min = multiper*Open + (1-multiper)*Close
downshadow_df = ((Open_Close_min - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.std(),columns = ['vol_downshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_downshadow_stable(Low,Open,Close,periodname,perioddays):
avg = vol_downshadow_avg(Low,Open,Close,periodname,perioddays)
std = vol_downshadow_std(Low,Open,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_downshadow_avg_'+periodname]/std['vol_downshadow_std_'+periodname],columns = ['vol_downshadow_stable_'+periodname])
return df
def vol_w_downshadow_avg(Low,Close,periodname,perioddays):
downshadow_df = ((Close - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.mean(),columns = ['vol_w_downshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_downshadow_std(Low,Close,periodname,perioddays):
downshadow_df = ((Close - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.std(),columns = ['vol_w_downshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_downshadow_stable(Low,Close,periodname,perioddays):
avg = vol_w_downshadow_avg(Low,Close,periodname,perioddays)
std = vol_w_downshadow_std(Low,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_w_downshadow_avg_'+periodname]/std['vol_w_downshadow_std_'+periodname],columns = ['vol_w_downshadow_stable_'+periodname])
return df
def vol_w_upshadow_avg(High,Close,periodname,perioddays):
upshadow_df = ((High - Close)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.mean(),columns = ['vol_w_upshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_upshadow_std(High,Close,periodname,perioddays):
upshadow_df = ((High - Close)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.std(),columns = ['vol_w_upshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_upshadow_stable(High,Close,periodname,perioddays):
avg = vol_w_upshadow_avg(High,Close,periodname,perioddays)
std = vol_w_upshadow_std(High,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_w_upshadow_avg_'+periodname]/std['vol_w_upshadow_std_'+periodname],columns = ['vol_w_upshadow_stable_'+periodname])
return df
def liq_turn_avg(tovr_r,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r_df.index[-1]
df = pd.DataFrame(tovr_r_df.mean(),columns = ['liq_turn_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_turn_std(tovr_r,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r_df.index[-1]
df = pd.DataFrame(tovr_r_df.std(),columns = ['liq_turn_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_vstd(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df = ret.iloc[-perioddays:]
df = pd.DataFrame(tovr_r_df.mean()/ret_df.std(),columns = ['liq_vstd_'+periodname])
todaydate = tovr_r_df.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_avg(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df_abs = ret.iloc[-perioddays:].abs()
amihud = ret_df_abs/tovr_r_df
df = pd.DataFrame(amihud.mean(),columns = ['liq_amihud_avg_'+periodname])
todaydate = amihud.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_std(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df_abs = ret.iloc[-perioddays:].abs()
amihud = ret_df_abs/tovr_r_df
df = pd.DataFrame(amihud.std(),columns = ['liq_amihud_std_'+periodname])
todaydate = amihud.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_stable(tovr_r,ret,periodname,perioddays):
avg = liq_amihud_avg(tovr_r,ret,periodname,perioddays)
std = liq_amihud_std(tovr_r,ret,periodname,perioddays)
v = avg['liq_amihud_avg_'+periodname]/std['liq_amihud_std_'+periodname]
df = pd.DataFrame(v,columns = ['liq_amihud_stable_'+periodname])
return df
def liq_shortcut_avg(tovr_r,High,Low,Open,Close,periodname,perioddays):
shortcut = 2*(High - Low) - (Open - Close).abs()
v = shortcut.iloc[-perioddays:]/tovr_r.iloc[-perioddays:]
df = pd.DataFrame(v.mean(),columns = ['liq_shortcut_avg_'+periodname])
todaydate = v.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_shortcut_std(tovr_r,High,Low,Open,Close,periodname,perioddays):
shortcut = 2*(High - Low) - (Open - Close).abs()
v = shortcut.iloc[-perioddays:]/tovr_r.iloc[-perioddays:]
df = pd.DataFrame(v.std(),columns = ['liq_shortcut_std_'+periodname])
todaydate = v.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_shortcut_stable(tovr_r,High,Low,Open,Close,periodname,perioddays):
avg = liq_shortcut_avg(tovr_r,High,Low,Open,Close,periodname,perioddays)
std = liq_shortcut_std(tovr_r,High,Low,Open,Close,periodname,perioddays)
v = avg['liq_shortcut_avg_'+periodname]/std['liq_shortcut_std_'+periodname]
df = pd.DataFrame(v,columns = ['liq_shortcut_stable_'+periodname])
return df
def PLUS(Close, High, Low):
plus = (2*Close - High - Low)/Close.shift(1)
todaydate = plus.index[-1]
df = pd.DataFrame(plus.iloc[-1])
df.columns = ['PLUS']
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_std_w_plus(Close, High, Low, tovr_r,periodname,perioddays):
plus = PLUS(Close, High, Low)
liq_std = liq_turn_std(tovr_r,periodname,perioddays)
plus['PLUS'] = plus['PLUS'].groupby(level = 'date', group_keys=False).apply(lambda x: x-min(0,x.min()))
swp = liq_std['liq_turn_std_'+periodname]*plus['PLUS']
df = pd.DataFrame(swp,columns = ['liq_std_w_plus_'+periodname])
return df
def tvr_std(tovr_r,periodname,perioddays):
df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r.index[-1]
fc = pd.DataFrame(df.std())
fc.columns = ['tvr_std_'+periodname]
fc['date'] = todaydate
fc = fc.reset_index().set_index(['date','symbol'])
return fc.sort_index()
def HL_Sprd(close,high,low,perioddays):
todaydate = close.index[-1]
sprd = (high/low - 1).iloc[-perioddays:]
close_ = close.iloc[-perioddays:]
phigh = close_.apply(lambda x: x>x.quantile(0.75)).astype(int).replace(0,np.nan)
plow = close_.apply(lambda x: x<x.quantile(0.25)).astype(int).replace(0,np.nan)
vhigh = pd.DataFrame((sprd*phigh).mean())
vlow = pd.DataFrame((sprd*plow).mean())
vlow['date'],vhigh['date'] = todaydate,todaydate
vhigh = vhigh.set_index('date',append=True).swaplevel()
vlow = vlow.set_index('date',append=True).swaplevel()
hlsprd = vhigh-vlow
hlsprd.columns = ['HL_Sprd']
return hlsprd.dropna()
def corr_price_turn(timerange,pricebyday,periodname):
price = pricebyday.loc[timerange]
fc = price[['close','turnover_ratio']].groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['close']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_price_turn_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def corr_ret_turn_post(timerange,pricebyday,periodname):
pricedf = pricebyday.loc[timerange]
pricedf['turnover_ratio'] = pricedf['turnover_ratio'].unstack().sort_index().shift(1).stack() # post
pricedf['ret'] = pricedf['close'].unstack().sort_index().pct_change().stack()
fc = pricedf[['ret','turnover_ratio']].groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['ret']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_ret_turn_post_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def corr_ret_turnd(timerange,pricebyday,periodname):
pricedf = pricebyday.loc[timerange]
pricedf['turnover_ratio_pct'] = pricedf['turnover_ratio'].unstack().sort_index().pct_change().stack()
pricedf['ret'] = pricedf['close'].unstack().sort_index().pct_change().stack()
fc = pricedf[['ret','turnover_ratio_pct']].dropna().groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['ret']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_ret_turnd_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def mts(sta_del_extrm,minbar,todaydate):
mts = sta_del_extrm[['single_trade_amt']]
mts['total_turnover'] = minbar['total_turnover']
mts = mts.groupby(level = 'symbol').corr()[::2]['total_turnover'].droplevel(1)
mts= pd.DataFrame(mts)
mts.columns = ['mts']
mts['date'] = todaydate
mts = mts.reset_index().set_index(['date','symbol'])
return mts
def mte(sta_del_extrm,minbar,todaydate):
mte = sta_del_extrm[['single_trade_amt']]
mte['close'] = minbar['close']
mte = mte.groupby(level = 'symbol').corr()[::2]['close'].droplevel(1)
mte= pd.DataFrame(mte)
mte.columns = ['mte']
mte['date'] = todaydate
mte = mte.reset_index().set_index(['date','symbol'])
return mte
def qua(sta_del_extrm,todaydate):
qua = sta_del_extrm.groupby(level = 'symbol').\
apply(lambda x: (x['single_trade_amt'].quantile(0.1)-\
x['single_trade_amt'].min())/(x['single_trade_amt'].max()-x['single_trade_amt'].min()))
qua = pd.DataFrame(qua,columns = ['qua'])
qua['date'] = todaydate
qua = qua.reset_index().set_index(['date','symbol'])
qua.index.name = ('date','symbol')
return qua
def skew(sta_50pct,todaydate):# 偏度因子skew
skew = sta_50pct.groupby(level = 'symbol').\
apply(lambda x: (((x['single_trade_amt']-x['single_trade_amt'].mean())/x['single_trade_amt'].std())**3).mean())
skew = pd.DataFrame(skew,columns = ['skew'])
skew['date'] = todaydate
skew = skew.reset_index().set_index(['date','symbol'])
skew.index.name = ('date','symbol')
return skew
def s_reverse(sing_trade_amt,minbar,todaydate):# 强反转因子
minute_r = sing_trade_amt.copy()
minute_r['minute_r'] = minbar['close']/minbar['open'] - 1
minute_r = minute_r.set_index('trading_date',append = True)
s_reverse = minute_r.groupby(level = 0).\
apply(lambda x: x[x.single_trade_amt > x.single_trade_amt.quantile(0.8)].minute_r.sum())
s_reverse = pd.DataFrame(s_reverse,columns = ['s_reverse'])
s_reverse['date'] = todaydate
s_reverse = s_reverse.reset_index().set_index(['date','symbol'])
s_reverse.index.name = ('date','symbol')
return s_reverse
def daily_sta_90pct(sta_del_extrm):# 日单笔成交额90分位值
daily_sta = sta_del_extrm.set_index('trading_date',append = True).rename_axis(index = {'trading_date':'date'})
daily_sta_90pct = daily_sta.droplevel('datetime').groupby(level = 'symbol').apply(lambda x: x.groupby(level = 1).quantile(0.9)).reset_index().set_index(['date','symbol'])
daily_sta_90pct.columns = ['daily_sta_90pct']
return daily_sta_90pct
def ideal_reverse(daily_sta_cal,Close):
daily_sta_cal['day_return'] = Close.pct_change().stack()
by_stock = list(daily_sta_cal.groupby(level = 1))
def apply_rolling_cal(rollingdata):
if len(rollingdata.index)<20:
return
else:
temp = rollingdata.sort_values('daily_sta_90pct')
returndf = rollingdata.iloc[-1:].copy()
returndf['ideal_reverse'] = temp.iloc[:10].day_return.sum() - temp.iloc[10:].day_return.sum()
return returndf['ideal_reverse']
ideal_reverse = list(map(lambda x:apply_rolling_cal(x[1]),by_stock))
ideal_reverse = pd.concat(ideal_reverse)
ideal_reverse = pd.DataFrame(ideal_reverse)
ideal_reverse.columns = ['ideal_reverse']
return ideal_reverse
|
Alpha-Planet
|
/Alpha_Planet-0.0.2-py3-none-any.whl/Alpha_Planet/Factor_Def_and_Get_Method.py
|
Factor_Def_and_Get_Method.py
|
import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
from tqdm import *
class single_signal_test(object):
def __init__(self) -> None:
pass
def cal_turnover(self,df,ndays):
# holdings:
# pd.Series
# multiindex: timestamp,code
# 值都是1
holdings = df.copy()
holdings = holdings.unstack().dropna(how ='all',axis = 1)
holdings = holdings.apply(lambda x: x/x.sum(),axis = 1)
holdings = holdings.fillna(0)
return (holdings.diff(ndays).abs().sum(axis = 1)/2)
def cal_holdingnums(self,df):
# holdings:
# pd.Series
# multiindex: timestamp,code
# 值都是1
holdings = df.copy()
holdings = holdings.groupby(level = 0).sum()
return holdings
def one_factor_grouper(self,df,factorname,quantiles,qcut): # 分组
# concatdf:pd.DataFrame
# factorname: str
# multiindex: timestamp,code
# columns: nday_return, factorname1, factorname2...
concatdf = df[[factorname]].copy().round(6)# 设置保留小数!
concatdf[factorname+'_rank'] = concatdf[factorname].groupby(level = 'date', group_keys = False).rank()
if qcut:
concatdf[factorname+'_quantile'] =concatdf[factorname+'_rank'].dropna().groupby(level = 'date', group_keys = False).apply(lambda x: pd.qcut(x,quantiles,labels=list(range(1,quantiles+1)))).astype(int)
else:
concatdf[factorname+'_quantile'] =concatdf[factorname+'_rank'].dropna().groupby(level = 'date', group_keys = False).apply(lambda x: pd.cut(x,quantiles,labels=list(range(1,quantiles+1)))).astype(int)
return concatdf
def one_factor_return(self,df,factorname,ndays,return_col,w_method,demean = False): # 计算分组收益
if w_method =='average':
qreturn = df.groupby(level = 'date', group_keys = True).apply(lambda x: x.groupby(factorname+'_quantile')[[return_col]].mean()/ndays).unstack()
qreturn.columns = [i[1] for i in list(qreturn)]
if w_method == 'factor_weighted':
tmpdf = df.copy()
tmpdf['rwf'] = tmpdf[return_col]*tmpdf[factorname]
tmpdf.dropna(subset = ['rwf'],inplace = True)
qreturn = tmpdf.groupby(level = 'date', group_keys = True).\
apply(lambda x: x.groupby(factorname+'_quantile').\
apply(lambda x: x['rwf'].sum()/x[factorname].sum() if x[factorname].sum()>0 else 0)/ndays)
# qreturn = tmpdf.groupby(level = 'date', group_keys = True).\
# apply(lambda x: x.groupby(factorname+'_quantile').\
# apply(lambda x: (x[return_col]*x[factorname]).sum()/x[factorname].sum())/ndays)
if w_method =='cap_weighted':
qreturn = df.groupby(level = 'date', group_keys = True).\
apply(lambda x: x.groupby(factorname+'_quantile').\
apply(lambda x: (x[return_col]*x['cap']).sum()/x['cap'].sum())/ndays)
if len(qreturn.index.names)==1:
pass
else:
qreturn= qreturn.unstack().apply(lambda x: x.fillna(x.mean()),axis = 1)
if demean:
qreturn = qreturn.apply(lambda x: x-x.mean(),axis = 1)
return qreturn
def one_factor_icir(self,df,factorname,return_col):
from scipy import stats
ic = df.groupby(level = 'date').apply(lambda x: x[[return_col,factorname]].corr('spearman'))
ic_org = ic[ic.index.get_level_values(1) ==return_col][factorname].dropna()
ictable = ic_org.describe()[['count','mean','std','min','max']].copy()
ictable['risk_adj'] = ic_org.mean()/ic_org.std()
ictable['skew'] = ic_org.skew()
ictable['kurtosis'] = ic_org.kurtosis()
if ictable['mean'] <0:
ictable['p-value'] = stats.ttest_1samp(ic_org,0,alternative='less').pvalue
else:
ictable['p-value'] = stats.ttest_1samp(ic_org,0,alternative='greater').pvalue
return ictable
def one_factor_ret_sharp(self,qreturn,ret_freq):
return qreturn.mean()/qreturn.std()*np.sqrt(252/ret_freq)
def factor_prepare(self,allfactors,fc,quantiles,qcut):
test_fc = allfactors[[fc]].copy().rename_axis(['date','symbol'])
res_df = self.one_factor_grouper(test_fc,fc,quantiles,qcut)# 序数标准化
return res_df
def factor_ret_test_sheet(self,
weight_method,
index_price, #'''基准日期是timestamp'''
fcname,
res_df,
Price,
days,
savedir,
demean = False):
from alphalens import utils
plottools = plot_tools()
mate_al = mate_alphalens()
tov_df = res_df.groupby(by = fcname+'_quantile').apply(lambda x: self.cal_turnover(x[fcname+'_quantile']/x[fcname+'_quantile'],days))
if len(tov_df.index.names)==1:
to = tov_df.mean(axis =1)
else:
to = tov_df.unstack().mean(axis = 1)
clean_factor,price = mate_al.index_mate(res_df.dropna(),Price)
fwr = utils.compute_forward_returns(price.stack(),price)
clean_factor[str(days)+'D'] = fwr[str(days)+'D']
clean_factor = clean_factor.reset_index()
clean_factor['date'] = clean_factor['date'].astype(str)
clean_factor = clean_factor.set_index(['date','asset']).dropna()
if index_price is not None:
clean_factor = mate_al.trans_ex_return(clean_factor,index_price,ret_col=[str(days)+'D'])
else:
clean_factor[str(days)+'D'] = clean_factor.groupby(level = 'date',group_keys = False).apply(lambda x: x[str(days)+'D']-x[str(days)+'D'].mean())
qreturn = self.one_factor_return(clean_factor,fcname,days,str(days)+'D',w_method = weight_method,demean=demean)
# ic
ic_table = self.one_factor_icir(clean_factor,fcname,str(days)+'D')
indicators = self.judge_material(qreturn,ic_table,days)
plottools.factor_plt(qreturn,to,indicators,fcname,days,savedir)
return qreturn,clean_factor,indicators
def judge_material(self,qreturn,ic_table,days):
from scipy import stats
indicators = ic_table.copy()
maxquantile = max(qreturn.columns)
lsret = qreturn[maxquantile] - qreturn[1]
groupmean = qreturn.mean(axis = 0)
groupmean_diff = groupmean.diff().dropna()
top_half = groupmean_diff.iloc[-5:]
top_sharp = qreturn[maxquantile].mean()/qreturn[maxquantile].std()*pow(252/days,1/2)
t,p_lsret =stats.ttest_1samp(lsret,0,alternative='greater')
t,p_groupmean =stats.ttest_1samp(groupmean_diff,0,alternative='greater')
t,p_tophalfmean = stats.ttest_1samp(top_half,0,alternative='greater')
indicators['TopQtl_SR'] = top_sharp
indicators['LSRet_pvalue'] = p_lsret
indicators['MeanRetDiff_pvalue'] = p_groupmean
indicators['TophalfMeanRetDiff_pvalue'] = p_tophalfmean
return indicators
def efficient_judge(self,indicators):
from scipy import stats
# 因子判别
'''
检验:
对两个都通过的是有用的因子
'''
if indicators['p-value']<= 0.05 and indicators['TopQtl_SR']>=1:
if indicators['TophalfMeanRetDiff_pvalue']<=0.3 and indicators['LSRet_pvalue']<=0.12:
# print(fc+'有用;头部{}组平均收益一阶差分p值{},多空收益p值{},ic_pvalue{},top组超额夏普{}'.format(int(maxquantile/2),p_top_halfmean,p_lsret))
return 1
# 且两个乘起来能跟原来的匹配;且另一个不能太差
# elif indicators['TophalfMeanRetDiff_pvalue']*indicators['LSRet_pvalue']<0.0025 and (indicators['TophalfMeanRetDiff_pvalue']/0.05 <= 0.1 or indicators['LSRet_pvalue']/0.05 <= 0.1) \
# and min(indicators['MeanRetDiff_pvalue'],indicators['TophalfMeanRetDiff_pvalue'])<=0.05 and indicators['TophalfMeanRetDiff_pvalue']<0.3:
# print(fc+'勉强有用;头部{}组平均收益一阶差分p值{},整体平均收益一阶差分p值{},多空收益p值{}'.format(int(maxquantile/2),p_top_halfmean,p_groupmean,p_lsret))
else:
return 2
return 0
def eff_classification(self,fc,indicator,judgefunc,strict_eff,unstrict_eff):
'''
输入:
因子矩阵
输出:
1、因子测试结果
2、噪声因子
'''
# 因子判别
if judgefunc(indicator) == 1:
strict_eff.append(fc)
unstrict_eff.append(fc)
elif judgefunc(indicator) == 2:
unstrict_eff.append(fc)
return strict_eff,unstrict_eff
class multi_factor_test(object):
def __init__(self) -> None:
self.sst = single_signal_test()
pass
def factors_abnormal_ret(self,factordf,return_col,factorlist,days,index_price = None,pricedf = None,longshort_return = False):
df = factordf.copy()
if pricedf is not None:
# 默认明收除今收
df[str(days)+'D'] = pricedf.pct_change(days,fill_method = None).shift(-days).stack()
if index_price is not None:
ml = mate_alphalens()
df,pricedf = ml.index_mate(df,pricedf)
df = ml.trans_ex_return(df,index_price,str(days)+'D')
df = df.rename(columns = {str(days)+'D':return_col+str(days)+'D'}).dropna(subset = return_col+str(days)+'D')
if longshort_return == False:
ret_k = df.groupby(level = 'date',group_keys = False).apply(lambda x: sm.formula.ols(return_col+str(days)+'D'+'~'+'+'.join(factorlist),data = x).fit().params)
del ret_k['Intercept']
else :
lscol = list(factordf)
quantiles = int(df[return_col+str(days)+'D'].groupby(level = 'date').count().mean()//100)
LSretList = []
for col in tqdm(lscol):
tmpdf = df.copy()
tmpdf[col+'_quantile'] = self.sst.one_factor_grouper(df,col,quantiles,False)[col+'_quantile']
qreturn = self.sst.one_factor_return(tmpdf,col,days,'ret{}D'.format(days),'factor_weighted',False)
LSretList.append(qreturn[max(list(qreturn))] - qreturn[1])
ret_k = pd.concat(LSretList,axis = 1)
ret_k.columns = lscol
return ret_k
def multif_barra_norm(self,allfactors,Bft):
df = allfactors.copy()
print('barra中性化....')
for fcname in tqdm(list(df)):
test_fc = df[[fcname]].copy().rename_axis(['date','symbol'])
residual_ols,params_ols = Bft.barra_compose(test_fc)
df[fcname] = residual_ols # 中性化之后的因子替换原始因子
return df
def multif_industry_norm(self,allfactors,industry_info):
df = allfactors.copy()
df['first_industry_name'] = industry_info
df = df.dropna(subset = 'first_industry_name').groupby(level = 'date',group_keys =False).apply(lambda x: x.groupby(by = 'first_industry_name',group_keys =False).apply(lambda x:x-x.mean(numeric_only=True)))
del df['first_industry_name']
return df
def multif_corr_ana(self,df,factornamelist): # 多因子相关性分析
# df:pd.DataFrame
# factornamelist: strlist
# multiindex: timestamp,code
# columns: nday_return, factorname1, factorname2...
df_ana = df[factornamelist].groupby(level = 'date').corr()
corr_mean = df_ana.groupby(level = 1).mean() # corr之后的矩阵第二层没名字,所以用1来表示;第二层是因子名
corr_ir = df_ana.groupby(level = 1).mean()/df_ana.groupby(level = 1).std()
return corr_mean.loc[list(corr_mean)],corr_ir.loc[list(corr_ir)]
def multif_pca_ana(self,originalFactor,domain_factor_nums): # 多因子pca分析
# originalFactor: pd.DataFrame
# multiindex: timestamp,code
# columns: factorname1, factorname2...
from sklearn import preprocessing
data = originalFactor.groupby(level = 'date', group_keys = False).apply(lambda x: preprocessing.scale(x))
data = np.vstack(data.values)
from sklearn.decomposition import PCA
pcaModel = PCA(domain_factor_nums)
pcaModel.fit(data)
pcaFactors = pcaModel.transform(data)
pcaFactors = pd.DataFrame(pcaFactors)
pcaFactors.index = originalFactor.index
pcaFactors.columns = ['pca_'+str(i) for i in range(domain_factor_nums)]
return pcaModel.explained_variance_,pcaModel.explained_variance_ratio_,pcaFactors
def batch_factors_test(self,weight_method,allfactors,Price,quantiles,days,qcut,savedir,index_price = None,demean = False):
returndict = {}
sst = single_signal_test()
for fc in tqdm(list(allfactors)):
res_df = sst.factor_prepare(allfactors,fc,quantiles,qcut)
sst.factor_ret_test_sheet(weight_method,index_price,fc,res_df,Price,days,savedir,demean)
returndict[fc] = res_df[[fc]]
return returndict
def multif_tsstable_test(self,originalData):
# originalFactor: pd.DataFrame
# multiindex: timestamp,code
# columns: factorname1, factorname2...
from statsmodels.tsa.stattools import adfuller
data = originalData.copy()#.groupby(level = 0).apply(lambda x: (x-x.mean())/x.std())不要再标准化了!!
mean_pvalue = data.groupby(level = 'date').apply(lambda x:x.mean()).apply(lambda x: adfuller(x)[1])
std_pvalue = data.groupby(level = 'date').apply(lambda x:x.std()).apply(lambda x: adfuller(x)[1])
skew_pvalue = data.groupby(level = 'date').apply(lambda x:x.skew()).apply(lambda x: adfuller(x)[1])
kurt_pvalue = data.groupby(level = 'date').apply(lambda x:x.kurt()).apply(lambda x: adfuller(x)[1])
yarn_pvalue = pd.concat([mean_pvalue,std_pvalue,skew_pvalue,kurt_pvalue],axis = 1)
yarn_pvalue.columns = ['mean','std','skew','kurt']
return yarn_pvalue
def del_updown_limit(self,factordf,daybar,text):
# 剔除涨跌停
notuplimit = daybar[~(daybar[text] == daybar.limit_up)]
notdownlimit = daybar[~(daybar[text] == daybar.limit_down)]
factordf = factordf[factordf.index.isin(notuplimit.index)]
factordf = factordf[factordf.index.isin(notdownlimit.index)]
return factordf
def in_some_pool(self,df,pool_components):
factordf = df.copy()
factordf['inpool']=pool_components.applymap(lambda x:1)
factordf['inpool'] = factordf['inpool'].apply(lambda x: 1 if x>0 else 0)
testdf = factordf[factordf['inpool']>=1]
del testdf['inpool']
return testdf
def orthog(self,factor_mat, y, xlist):
df = factor_mat.replace([np.inf, -np.inf], np.nan).dropna()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
params = regre.params[~(regre.params.index == 'Intercept')]
intercept = regre.params[(regre.params.index == 'Intercept')]
residual = df[y] - (df[list(params.index)]*params).sum(axis = 1) - intercept.values
residual = pd.DataFrame(residual)
residual.columns = [y]
return self.mat_normlize(residual),params
def mat_orthog(self,factor_mat):
temp1 = factor_mat.replace([np.inf, -np.inf], np.nan).dropna()
for i in list(temp1):
no = list(temp1).index(i)
if no==0:
temp1[i] = self.mat_normlize(temp1[i])
continue
fclist = list(filter(lambda x: x!=i,list(temp1)[:no]))
temp1[i] = self.orthog(temp1,i,fclist)[0]
return temp1
def ts_mat_orthog(self,factor_mat):
return factor_mat.groupby(level = 'date',group_keys = False).apply(self.mat_orthog)
def mat_normlize(self,factor_mat):
df = factor_mat.rename_axis(['date','symbol']).replace([np.inf, -np.inf], np.nan)
def norm(x):
return (x - x.min())/(x.max()-x.min())
return df.groupby(level = 'date',group_keys = False).apply(norm)
def mat_ranknormlize(self,factor_mat):
df = factor_mat.rename_axis(['date','symbol']).replace([np.inf, -np.inf], np.nan)
def norm(x):
x_rank = x.rank()
return (x_rank - x_rank.min())/(x_rank.max()-x_rank.min())
return df.groupby(level = 'date',group_keys = False).apply(norm)
def multindex_shift(self,fcdf):
df = fcdf.reset_index()
datelist = list(df['date'].drop_duplicates())
datedict = dict(zip(datelist[:-1],datelist[1:]))
df['date'] =df['date'].apply(lambda x: datedict[x] if x in datedict.keys() else np.nan)
return df.dropna(subset = 'date').set_index(['date','symbol'])
class Barra_factor_ana(object):
'''
1. growth要求至少504天的数据,部分股票不满足该条件会导致在因子整合到一起的时候被剔除
2. barrafactor必须为双重索引,且第一重索引是日期,第二重索引是标的
'''
def __init__(self,df=None,start_date=None,end_date=None,dir=None,skip_fileload=None) -> None:
# 预加载数据
if not skip_fileload:
self.price = df
dailyreturn = df/df.shift(1)-1
dailyreturn.dropna(how = 'all',inplace=True)
self.returndata = dailyreturn
self.start_date = start_date
self.end_date = end_date
import os
filelist = os.listdir(dir)
self.filedict = {}
for f in filelist:
if f[-3:]=='csv':
self.filedict[f[:-4]] = pd.read_csv(dir+f,index_col = [0,1])
pass
def rise_barra_factors(self):
print('rise size')
self.size = np.log(self.filedict['market_cap']).dropna()
def OLSparams(y,x):
print('rise beta')
X_ = x.droplevel('order_book_id')
df = y.copy()
df['market_r'] = X_['r']
df.dropna(subset = 'market_r',inplace = True)
dflist = list(df.rolling(252))[252:]
paramslist = []
for olsdf in dflist:
mod = sm.OLS(olsdf,sm.add_constant(olsdf['market_r']))
re = mod.fit()
params = re.params.T
params.index = olsdf.columns
params = params[params.index!='market_r']
params['date'] = olsdf.index[-1]
params = params.rename(columns = {'market_r':'beta'})
paramslist.append(params)
olsparams = pd.concat(paramslist).set_index('date',append=True).unstack().T
constdf = olsparams.loc['const'].ewm(halflife = 63,ignore_na = True,adjust = False).mean().stack()
betadf = olsparams.loc['beta'].ewm(halflife = 63,ignore_na = True,adjust = False).mean().stack()
# cal residual
mkt_df = pd.concat([X_['r']]*len(list(betadf.unstack())),axis = 1)
mkt_df.columns = list(betadf.unstack())
residual = y - betadf.unstack()*mkt_df - constdf.unstack() # 这里的residual已经是经过ewm的beta和const计算得到的就不用再ewm了
return {'beta':betadf,'const':constdf,'residual':residual}
def MOMTM(y):
df = np.log(1+y)
momtm = df.ewm(halflife=126,ignore_na = True,adjust = False).mean()#.iloc[-1:]
return momtm
def CMRA(y,T):
date = y.index[-1]
dflist= []
for i in range(1,T+1):
pct_n_month = pd.DataFrame((y/y.shift(21*i)-1).iloc[-1])/21
dflist.append(pct_n_month)
df = pd.concat(dflist,axis =1)
zmax = df.max(axis =1)
zmin = df.min(axis = 1)
cmra = pd.DataFrame(np.log(1+zmax)-np.log(1+zmin),columns = [date]).T
return cmra
def orthog(barrafactor,y,xlist):
df = barrafactor.copy()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
for p in xlist:
df[p]*= regre.params[p]
df[y+'_orth'] = df[y] - df[xlist].sum(axis = 1)-regre.params['Intercept']
return df[[y+'_orth']]
# beta
self.olsparams = OLSparams(self.returndata,self.filedict['market_r'])
self.beta = pd.DataFrame(self.olsparams['beta']).dropna()
self.beta.columns = ['beta']
# momentum
print('rise momentum')
# retroll504 = list(self.returndata.rolling(504))[504:]
# self.momtm = pd.concat(list(map(lambda x: MOMTM(x),retroll504))).shift(21).dropna(how = 'all')
self.momtm = MOMTM(self.returndata).shift(21).dropna(how = 'all')
self.momtm = pd.DataFrame(self.momtm.stack(),columns=['momentum'])
# residual volatility
print('rise residual volatility')
self.hist_volatility = self.returndata.ewm(halflife = 42,ignore_na = True,adjust = False).std().dropna(how = 'all')
CMRAlist = list(self.price.rolling(252))[252:]
self.CMRA = pd.concat(list(map(lambda x: CMRA(x,12),CMRAlist)))
self.Hsigma = self.olsparams['residual'].rolling(252,min_periods = 1).std()
self.residual_volatility = pd.DataFrame((self.hist_volatility*0.74+self.CMRA*0.16+self.Hsigma*0.1).stack()).dropna()
self.residual_volatility.columns = ['residual_volatility']
# non-linear size
print('rise non-linear size')
self.nlsize = (self.size**3).dropna()
self.nlsize.columns = ['nlsize']
# Bp
print('rise Bp')
self.Bp = self.filedict['Bp'].dropna()
# liquidity
print('rise Liquidity')
self.tvrdf = self.filedict['turnover']
self.liq_1m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(22,min_periods =1).mean())
self.liq_3m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(74,min_periods =1).mean())
self.liq_12m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(252,min_periods =1).mean())
self.liq = (0.35*self.liq_1m + 0.35*self.liq_3m + 0.3*self.liq_12m).dropna()
print('rise Earning Yield')
self.earning_yield = pd.concat([self.filedict['Ep'],self.filedict['Sp']],axis = 1)
self.earning_yield['earning_yield'] = self.earning_yield['ep_ratio_ttm']*0.66+self.earning_yield['sp_ratio_ttm']*0.34
self.earning_yield = self.earning_yield[['earning_yield']].dropna()
# growth
print('rise growth')
NP = self.filedict['NPGO'].unstack()
NP = (NP-NP.shift(504))/NP.shift(504).abs().replace(0,np.nan)
NP = NP.stack()
RVN = self.filedict['RGO'].unstack()
RVN = (RVN - RVN.shift(504))/RVN.shift(504).abs().replace(0,np.nan)
RVN = RVN.stack()
self.growth = pd.DataFrame(NP['net_profit_parent_company_ttm_0']*0.34+RVN['revenue_ttm_0']*0.66)
self.growth.columns = ['growth']
self.growth.dropna(inplace=True)
# leverage
print('rise leverage')
self.leverage = self.filedict['MLEV']['du_equity_multiplier_ttm']*0.38+self.filedict['DTOA']['debt_to_asset_ratio_ttm']*0.35+self.filedict['BLEV']['book_leverage_ttm']*0.27
self.leverage = pd.DataFrame(self.leverage)
self.leverage.columns = ['leverage']
self.leverage.dropna(inplace=True)
# concat
self.barrafactor = pd.concat([
self.size,
self.beta,
self.momtm,
self.residual_volatility,
self.nlsize,
self.Bp,
self.liq,
self.earning_yield,
self.growth,
self.leverage],axis = 1).sort_index(level = 0)
'''正则化'''
# 未经正则化的原始因子已存为类变量,可直接调用
print('Orthogonalizing....')
y = ['residual_volatility','nlsize','turnover']
xlist = ['circulation_A','beta']
# 不dropna会报错
self.barrafactor[y[0]] = self.barrafactor[[y[0]]+xlist].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[0],xlist))
self.barrafactor[y[1]] = self.barrafactor[[y[1]]+xlist[:1]].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[1],xlist[:1]))
self.barrafactor[y[2]] = self.barrafactor[[y[2]]+xlist[:1]].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[2],xlist[:1]))
# 标准化
def return_barra_factor(self,rank_normalize:bool):
mft = multi_factor_test()
if rank_normalize:
return mft.mat_ranknormlize(self.barrafactor)
else:
return mft.mat_normlize(self.barrafactor)
def barra_compose(self,factordata):
# 因子是rank数据
decompose = pd.concat([self.barrafactor,factordata],axis = 1).dropna().rename_axis(['date','symbol'])
def orthog(barrafactor,y,xlist):
df = barrafactor.copy()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
params = regre.params[~(regre.params.index == 'Intercept')]
intercept = regre.params[(regre.params.index == 'Intercept')]
residual = df[y] - (df[list(params.index)]*params).sum(axis = 1) - intercept.values
return residual,params
# 这种方法只算一天的会错
# residual_ols =decompose.groupby(level = 0).apply(lambda x: orthog(x,list(decompose)[-1],list(decompose)[:-1])[0]).droplevel(0)
# params_ols =decompose.groupby(level = 0).apply(lambda x: orthog(x,list(decompose)[-1],list(decompose)[:-1])[1])
# return residual_ols,params_ols
decomposebyday = list(decompose.groupby(level = 'date'))
residual_olslist = []
params_olslist = []
for df in decomposebyday:
x = df[1]
residual_ols,params_ols = orthog(x,list(decompose)[-1],list(decompose)[:-1])
residual_olslist.append(residual_ols)
params_olslist.append(pd.DataFrame(params_ols,columns = [df[0]]).T)
return pd.concat(residual_olslist),pd.concat(params_olslist)
def barra_style_pool(self,style,cutnum):
bystyle = self.barrafactor[[style]].copy()
bystyle[style+'_group'] = bystyle[style].dropna().groupby(level = 0,group_keys=False).apply(lambda x: pd.cut(x,cutnum,labels=list(range(1,cutnum+1))))
return bystyle
def factor_performance_bystyle(self,factordata,factorname,style,cutnum):
# 即便因子在风格上没有偏斜,仍然会有不同风格上因子表现不同的情况
bystyle = pd.concat([factordata,self.barrafactor[[style]]],axis = 1)
bystyle[style+'_group'] = bystyle[style].dropna().groupby(level = 0,group_keys=False).apply(lambda x: pd.cut(x,cutnum,labels=list(range(1,cutnum+1))))
ic_daily = bystyle.groupby(style+'_group',group_keys=False).apply(lambda x: x[[factorname,'nday_return']].groupby(level = 0).apply(lambda x: x.corr('spearman').iloc[0,1])).T
return ic_daily
class AutoMatic(object):
sst = single_signal_test()
mft = multi_factor_test()
def __init__(self,Bft,base_index,Price,quantiles,days,qcut,demean,weighted_method) -> None:
'''base_index基准价格时间索引是timestamp'''
self.Bft = Bft
self.Price = Price.copy()
self.base_index = base_index
self.quantiles = quantiles
self.days = days
self.qcut = qcut
self.demean = demean
self.weighted_method = weighted_method
pass
def AutoMatic_DirCheck(self,path):
if not os.path.exists(path):
os.makedirs(path)
def AutoMatic_Direc_Adjust(self,factors,dir_):
neu_factors = factors.copy()
direction_dict = {}
strict_eff = []
unstrict_eff = []
self.AutoMatic_DirCheck(dir_+'direction/')
self.AutoMatic_DirCheck(dir_+'direction/redirection/')
for fc in list(neu_factors):
res_df = self.sst.factor_prepare(neu_factors,fc,self.quantiles,self.qcut)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,tmp,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,fc,res_df,self.Price,self.days,dir_+'/direction/',self.demean)
if qreturn[list(qreturn)[0]].sum()<= qreturn[self.quantiles].sum():
print(fc+'是正向因子')
direction_dict[fc] = 1
if qreturn[list(qreturn)[0]].sum() > qreturn[self.quantiles].sum():
print(fc+'是负向因子')
neu_factors = neu_factors.copy()
neu_factors[fc]=self.mft.mat_normlize(-1*neu_factors[fc])
direction_dict[fc] = -1
res_df = self.sst.factor_prepare(neu_factors,fc,self.quantiles,self.qcut)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,tmp,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,fc,res_df,self.Price,self.days,dir_+'direction/redirection/',self.demean)
# 有效性判别
strict_eff,unstrict_eff = self.sst.eff_classification(fc,indicator,self.sst.efficient_judge,strict_eff,unstrict_eff)
return direction_dict,strict_eff,unstrict_eff
def AutoMatic_Factor_Merge_Ret(self,neu_factors,base_factors,mergename,dir_):
base_f = pd.DataFrame(neu_factors[base_factors].sum(axis = 1),columns = [mergename])
res_df = self.sst.factor_prepare(base_f,mergename,self.quantiles,self.qcut)
self.AutoMatic_DirCheck(dir_)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,clean_factor,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,mergename,res_df,self.Price,self.days,dir_,self.demean)
return qreturn,clean_factor
def AutoMatic_Compare_Indicator(self,qreturn,reverse):
if reverse:
maxq = min(list(qreturn))
base_perf_sr = -1*qreturn[maxq].mean()/qreturn[maxq].std()
return base_perf_sr
maxq = max(list(qreturn))
base_perf_sr = qreturn[maxq].mean()/qreturn[maxq].std()
return base_perf_sr
def threads_pool_run(self,params_batch):
InSampleFactors,i,dir_ = params_batch[0] , params_batch[1] , params_batch[2]
import matplotlib
matplotlib.use('agg')
savedir = dir_+'{}/'.format(i)
direction_dict,strict_eff,unstrict_eff =self.AutoMatic_Direc_Adjust(InSampleFactors,savedir) # 方向调整
return (direction_dict,strict_eff,unstrict_eff)
def AutoMatic_Stochastic_Optimizer(self,test_factor,threads_num,dir_):
dateset = list(set(test_factor.index.get_level_values('date')))
import multiprocessing
from multiprocessing import Pool
InSplList = []
for i in range(threads_num):
randomdate = sorted(np.random.choice(dateset,int(len(dateset)/5),replace = False))
InSplList.append((test_factor.loc[randomdate],i,dir_))
pool = Pool(min(multiprocessing.cpu_count(),threads_num))
return pool.map(self.threads_pool_run,InSplList)
def AutoMatic_Perf_InPool(self,neu_factors,base_factors,reverse,save_dir):
qreturn,tmp =self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors,'basef',save_dir+'temp/')
base_perf_sr= self.AutoMatic_Compare_Indicator(qreturn,reverse)
others = list(filter(lambda x: x not in base_factors,list(neu_factors)))
for sf in others:# 挨个加表现
print(base_factors)
qreturn,tmp =self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors+[sf],sf+'_basef',save_dir+'temp/')
perf_sr = self.AutoMatic_Compare_Indicator(qreturn,reverse)
print('是否超越此前表现:{};本次超额夏普:{},此前最佳超额夏普:{}'.format(perf_sr > base_perf_sr,perf_sr,base_perf_sr))
if perf_sr > base_perf_sr:
base_factors.append(sf)
if perf_sr > base_perf_sr:
base_perf_sr = perf_sr
qreturn,clean_factor = self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors,'basef',save_dir+'final/')
return qreturn,clean_factor
class plot_tools(object):
def __init__(self) -> None:
import matplotlib
self.plotstatus = matplotlib.get_backend()
pass
def trio_plt(self,qmean,qcum,quantiles): # 画收益图
import matplotlib.pyplot as plt
qmean[list(range(1,quantiles+1))].plot(kind= 'bar',title = 'mean')
plt.show()
qcum[list(range(1,quantiles+1))].plot(title = 'cumreturn')
plt.legend(loc = 'upper center',bbox_to_anchor=(1.1, 1.02))
plt.show()
(qcum[10]-qcum[1]).plot(title = 'long-short')
plt.show()
def fbplot(self,frontplot,bgplot,c,fname,bname):
# frontplot,bgplot:
# pd.Series
# multiindex: timestamp,code
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
tickspace = len(frontplot)//12
fig = plt.figure()
a1=fig.add_axes([0,0,1,1])
a1.bar(frontplot.index,bgplot.loc[frontplot.index],color = c)
a1.tick_params(axis='x', labelrotation= 30)
a1.xaxis.set_major_locator(ticker.MultipleLocator(tickspace))
a2 = a1.twinx()
a2.plot(frontplot.index,frontplot,color = 'red')
a2.tick_params(axis='x', labelrotation= 30)
a2.xaxis.set_major_locator(ticker.MultipleLocator(tickspace))
fig.legend(frameon = False,labels = [bname+'(left)',fname+'(right)'],loc = 'upper center')
plt.show()
def factor_plt(self,qreturn,to,ictable,fc,ndays,savedir=''):
from alphalens import utils
from pandas.plotting import table
numtable = pd.concat([qreturn.mean(),qreturn.sum(),qreturn.mean()/qreturn.std()],axis = 1).rename(columns= {0:'avg',1:'sum',2:'risk-adj'}).T
top_quantile = max(list(qreturn))
totalSeed = qreturn.index
xticks = list(range(0, len(totalSeed), 60))
xlabels = [str(totalSeed[x]) for x in xticks]
import matplotlib.pyplot as plt
plt.figure(dpi=300, figsize=(24, 12))
ax = plt.subplot(321,frame_on=False,title = fc+'_retsheet_bygroup')
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, numtable.round(5), loc='center') # 将df换成需要保存的dataframe即可
ax = plt.subplot(365,frame_on=False,title = str(ndays)+'days_information')
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, ictable.round(5), loc='center') # 将df换成需要保存的dataframe即可
plt.subplot(325,title = fc+'_cumret_bygroup')
plt.plot(qreturn.index,qreturn.cumsum(),label = list(qreturn))
plt.legend()
plt.xticks(rotation=90)
plt.xticks(ticks=xticks, labels=xlabels)
plt.subplot(324,title = fc+'_turnover_bygroup')
plt.bar(to.index,to,color="blue")
plt.subplot(323,title = fc+'_avgret_bygroup')
plt.bar(qreturn.mean().index,qreturn.mean(),color="y")
plt.subplot(326,title = fc+'_lsret_bygroup')
plt.plot(qreturn.index,(qreturn[top_quantile]-qreturn[1]).cumsum(),color="g")
plt.xticks(rotation=90)
plt.xticks(ticks=xticks, labels=xlabels)
try:
os.remove(savedir+fc+'.jpg')
print(fc+'.jpg'+' 旧文件删除')
except:
print(fc+'.jpg'+' 是新文件')
plt.savefig(savedir+fc+'.jpg')
if self.plotstatus != 'agg':
plt.show()
plt.close()
# 热力图展示
def ShowHeatMap(self,DataFrame,savedir='',triangle = True):
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(35, 15))
ax.set_title('Wine GRA')
# 设置展示一半,如果不需要注释掉mask即可
if triangle:
mask = np.zeros_like(DataFrame)
mask[np.triu_indices_from(mask)] = True # np.triu_indices 上三角矩阵
with sns.axes_style("white"):
sns.heatmap(DataFrame,
cmap="YlGnBu",
annot=True,
mask=mask,
)
else :
with sns.axes_style("white"):
sns.heatmap(DataFrame,
cmap="YlGnBu",
annot=True,
)
plt.savefig(savedir)
if self.plotstatus != 'agg':
plt.show()
def combine_imgs_pdf(self,folder_path, pdf_file_path,idstname):
import os
from PIL import Image
"""
合成文件夹下的所有图片为pdf
Args:
folder_path (str): 源文件夹
pdf_file_path (str): 输出路径
"""
files = os.listdir(folder_path)
png_files = []
sources = []
for file in files:
if 'png' in file or 'jpg' in file:
png_files.append(folder_path + file)
png_files.sort()
for file in png_files:
png_file = Image.open(file)
png_file = png_file.convert("RGB")
sources.append(png_file)
sources[0].save(pdf_file_path+'{}.pdf'.format(idstname), "pdf", save_all=True, append_images=sources[1:],quality = 95)
class mate_alphalens(object):
def __init__(self) -> None:
pass
def index_mate(self,factordata,price):
fcdf = factordata.reset_index()
fcdf['date'] = pd.to_datetime(fcdf['date'])
fcdf = fcdf.rename(columns = {'symbol':'asset'}).set_index(['date','asset'])
ptemp = price.copy()
ptemp.index = pd.to_datetime(ptemp.index)
return fcdf,ptemp
def trans_ex_return(self,clean_factor,index_price,ret_col):
from alphalens import utils
index_price['factor'] = 1
base_ret = utils.compute_forward_returns(index_price[['factor']],index_price['close'].unstack())
base_ret = base_ret.droplevel('asset').reindex(clean_factor.index.get_level_values(0))
base_ret['asset'] = clean_factor.index.get_level_values('asset')
base_ret = base_ret.set_index(['asset'],append=True)
df = clean_factor.copy()
df[ret_col]= df[ret_col]-base_ret[ret_col]
return df
class alert(object):
def __init__(self,**file):
if file:
self.filename = file
else:
import sys
self.filename = sys.argv[0]
pass
def finish_alert(self):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
# 1. 连接邮箱服务器
con = smtplib.SMTP_SSL('smtp.qq.com', 465)
# 2. 登录邮箱
con.login('[email protected]', 'jwtjvrktevlobiag')
# 2. 准备数据
# 创建邮件对象
msg = MIMEMultipart()
# 设置邮件主题
subject = Header('{}程序运行完毕'.format(self.filename), 'utf-8').encode()
msg['Subject'] = subject
# 设置邮件发送者
msg['From'] = '[email protected]'
# 设置邮件接受者
msg['To'] = '[email protected]'
# 添加⽂文字内容
text = MIMEText('{}程序运行完毕'.format(self.filename), 'plain', 'utf-8')
msg.attach(text)
# 3.发送邮件
con.sendmail('[email protected]', '[email protected]', msg.as_string())
con.quit()
def breakdown_alert(self):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
# 1. 连接邮箱服务器
con = smtplib.SMTP_SSL('smtp.qq.com', 465)
# 2. 登录邮箱
con.login('[email protected]', 'jwtjvrktevlobiag')
# 2. 准备数据
# 创建邮件对象
msg = MIMEMultipart()
# 设置邮件主题
subject = Header('{}程序运行失败'.format(self.filename), 'utf-8').encode()
msg['Subject'] = subject
# 设置邮件发送者
msg['From'] = '[email protected]'
# 设置邮件接受者
msg['To'] = '[email protected]'
# 添加⽂文字内容
text = MIMEText('{}程序运行失败'.format(self.filename), 'plain', 'utf-8')
msg.attach(text)
# 3.发送邮件
con.sendmail('[email protected]', '[email protected]', msg.as_string())
con.quit()
# sst:
# def noise_judge(self,qreturn,fc):
# from scipy import stats
# from statsmodels.stats.diagnostic import acorr_ljungbox
# # 因子判别
# lsret,groupmean,groupmean_diff,top_sharp = self.judge_material(qreturn,fc)
# '''
# 检验:
# 对两个都没通过的可能是噪声的因子做自相关性检验,因为0假设是有相关性,所以哪怕只有一点自相关性(123123)都可能不会被拒绝,所以被拒绝的基本上可认定为噪声
# '''
# t,p_lsret =stats.ttest_1samp(lsret,0,alternative='greater')
# t,p_groupmean = stats.ttest_1samp(groupmean_diff,0,alternative='greater')
# if p_groupmean>0.05 and p_lsret>0.05:
# print(fc+'可能是噪声;分组平均收益一阶差分p值{},多空收益p值{}'.format(p_groupmean,p_lsret))
# # ls_ljung = acorr_ljungbox(lsret.cumsum(), lags=[1,5,10,20])
# gmdf_ljung = acorr_ljungbox(groupmean, lags=[1,5])
# if gmdf_ljung['lb_pvalue'].min()>=0.05:
# print(fc+'是噪声;分组平均收益自相关检验最小p值{}'.format(gmdf_ljung['lb_pvalue'].min()))
# return True
# else:
# print('无法判定'+fc+'是噪声;分组平均收益自相关检验最小p值{}'.format(gmdf_ljung['lb_pvalue'].min()))
# return False
# def short_judge(self,qreturn,fc):
# from scipy import stats
# # 因子判别
# lsret,groupmean,groupmean_diff = self.judge_material(qreturn,fc)
# '''
# 检验:
# 对两个都通过的是有用的因子
# '''
# maxquantile = max(list(lsret))
# top5 = groupmean_diff.iloc[-5:]
# bottom5 = groupmean_diff.iloc[:5]
# t,p_top5 = stats.ttest_1samp(top5,0,alternative='greater')
# t,p_bottom5 = stats.ttest_1samp(bottom5,0,alternative='greater')
# if p_top5>0.5 and p_bottom5<0.1 and (abs(groupmean.iloc[-1])<abs(groupmean.iloc[0])):
# print(fc+'是空头因子;top5组平均收益一阶差分p值{},bottom5组平均收益一阶差分p值{}'.format(p_top5,p_bottom5))
# return True
# return False
# mft:
# def multif_denoisies(self,noise_factors_list,allfactors,threshold):
# '''
# 输入:
# 因子矩阵,噪声因子
# 输出:
# 去噪后的因子
# '''
# if len(noise_factors_list)==0:
# print('无可用于去噪的噪声')
# return allfactors
# other_factors_df = allfactors[list(filter(lambda x: x not in noise_factors_list,list(allfactors)))]
# noise_factors_df = self.ts_mat_orthog(allfactors[noise_factors_list])
# factordf = pd.concat([other_factors_df,noise_factors_df],axis = 1)
# # 去噪
# other_factors = list(other_factors_df)
# corrdf = self.multif_corr_ana(factordf,list(factordf))[0]
# print('相关性详情:')
# print(corrdf)
# corrdf = corrdf.loc[other_factors,noise_factors_list].abs().max(axis = 1)
# print('要被去噪的因子:')
# corr_with_noise = list(corrdf[corrdf>=threshold].index)
# print(corr_with_noise)
# for fc in corr_with_noise:
# factordf[fc] = self.orthog(factordf, fc, noise_factors_list)[0]
# return factordf[other_factors]
# def multif_cal_weight(self,factordf,factorlist,return_col,weight_type):
# # factordf: pd.DataFrame
# # multiindex: timestamp,code
# # columns: factorname1, factorname2...,returndata
# # factorlist: strlist
# # return_col: column name, str
# df = factordf.copy()
# ret_k = self.fators_abnormal_ret(df,return_col,factorlist)
# ic = df.groupby(level = 'date').apply(lambda x: x.corr(method= 'spearman')[return_col])
# del ic['ret']
# weight = ret_k*ic
# direc = ic.mean().apply(lambda x: 1 if x>0 else -1)
# if weight_type == 1:
# return weight.mean()/weight.std()*direc
# elif weight_type == 2:
# return weight.mean()*direc
# else:
# return direc
# # if weight_type == '风险平价加权':
# # cov = weight[factorlist].cov()
# # from scipy.optimize import minimize
# # def objective(x):
# # w_cov = np.dot(cov,x.T)
# # for n in range(len(x)):
# # w_cov[n] *= x[n]
# # mat = np.array([w_cov]*len(x))
# # scale = 1/sum(abs(mat))
# # return np.sum(abs(scale*(mat-mat.T)))
# # initial_w=np.array([0.2]*len(factorlist))
# # cons = []
# # cons.append({'type':'eq','fun':lambda x: sum(x)-1})
# # for i in range(len(initial_w)):
# # cons.append({'type':'ineq','fun':lambda x: x[i]})
# # #结果
# # res=minimize(objective,initial_w,method='SLSQP',constraints=cons)
# # params = pd.Series(res.x)
# # params.index = cov.index
# # return params
# def weighted_factor(self,factordf,weight):
# # factordf: pd.DataFrame
# # multiindex: timestamp,code
# # columns: factorname1, factorname2...
# # weight:pd.Series
# wf = (weight*factordf).sum(axis = 1)
# return pd.DataFrame(wf,columns = ['weighted_factor'])
|
Alpha-Planet
|
/Alpha_Planet-0.0.2-py3-none-any.whl/Alpha_Planet/Alpha_Rabbit.py
|
Alpha_Rabbit.py
|
from Alpha_Rabbit.Factor_Def_and_Get_Method import *
_method = Factor_get_method()
def Factor_Calculator(pricebyday,minbar,conn,todaydate,notst,factors_to_cal):
######################################## 日间数据计算因子 ####################################
uploadfactordict = {}
Close = pricebyday['close'].unstack().sort_index()
Open = pricebyday['open'].unstack().sort_index()
High = pricebyday['high'].unstack().sort_index()
Low = pricebyday['low'].unstack().sort_index()
volume = pricebyday[['volume']].pivot_table(index = 'date',columns = 'symbol',values = 'volume').sort_index()
total_turnover = pricebyday[['total_turnover']].pivot_table(index = 'date',columns = 'symbol',values = 'total_turnover').sort_index()
tovr_r = pricebyday['turnover_ratio'].unstack().sort_index()
Close_ret = Close.pct_change()
tempClose = Close.iloc[-30:]
tempOpen = Open.iloc[-30:]
tempHigh = High.iloc[-30:]
tempLow = Low.iloc[-30:]
anaual_close = Close.iloc[-272:]
anaual_High = High.iloc[-272:]
if 'mmt_intraday_M' in factors_to_cal or factors_to_cal == 'all':
# 1个月日内动量
uploadfactordict['mmt_intraday_M'] = mmt_intraday_M(tempClose,tempOpen)
if 'mmt_range_M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅调整动量
uploadfactordict['mmt_range_M'] = mmt_range_M(tempHigh,tempLow,tempClose)
if 'mmt_overnight_M' in factors_to_cal or factors_to_cal == 'all':
# 隔夜动量
uploadfactordict['mmt_overnight_M'] = mmt_overnight_M(tempOpen,tempClose)
if 'mmt_route_M' in factors_to_cal or factors_to_cal == 'all':
# 路径调整动量
uploadfactordict['mmt_route_M'] = mmt_route_M(tempClose)
if 'mmt_discrete_M' in factors_to_cal or factors_to_cal == 'all':
# 信息离散度动量
uploadfactordict['mmt_discrete_M'] = mmt_discrete_M(tempClose)
if 'mmt_sec_rank_M' in factors_to_cal or factors_to_cal == 'all':
# 截面rank动量
uploadfactordict['mmt_sec_rank_M'] = mmt_sec_rank_M(tempClose)
if 'mmt_time_rank_M' in factors_to_cal or factors_to_cal == 'all':
# 时序rank_score
uploadfactordict['mmt_time_rank_M'] = mmt_time_rank_M(anaual_close)
if 'mmt_highest_days_A' in factors_to_cal or factors_to_cal == 'all':
# 最高价距今天数
uploadfactordict['mmt_highest_days_A'] = mmt_highest_days_A(anaual_High)
if 'volumestable' in factors_to_cal or factors_to_cal == 'all':
# 成交量稳定度
uploadfactordict['volumestable'] = volumestable(volume)
if '_con' in factors_to_cal or factors_to_cal == 'all':
# 收益一致性因子
uploadfactordict['_con'] = re_con(tempClose)
if 'bofu_money' in factors_to_cal or factors_to_cal == 'all':
# 波幅/成交额
uploadfactordict['bofu_money'] = bofu_money(tempHigh,tempLow,tempOpen,total_turnover)
if 'vol_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月收益率波动
uploadfactordict['vol_std_1M'] = vol_std(Close_ret,'1M',30)
if 'vol_up_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上行收益率波动
uploadfactordict['vol_up_std_1M'] = vol_up_std(Close_ret,'1M',30)
if 'vol_down_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下行收益率波动
uploadfactordict['vol_down_std_1M'] = vol_down_std(Close_ret,'1M',30)
if 'vol_updown_ratio_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上行波动和下行波动比
uploadfactordict['vol_updown_ratio_1M'] = vol_updown_ratio(Close_ret,'1M',30)
if 'vol_highlow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅均值
uploadfactordict['vol_highlow_avg_1M'] = vol_highlow_avg(High,Low,'1M',30)
if 'vol_highlow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅波动
uploadfactordict['vol_highlow_std_1M'] = vol_highlow_std(High,Low,'1M',30)
if 'vol_highlow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅稳定度
uploadfactordict['vol_highlow_stable_1M'] = vol_highlow_stable(High,Low,'1M',30)
if 'vol_upshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线均值
uploadfactordict['vol_upshadow_avg_1M'] = vol_upshadow_avg(High,Open,Close,'1M',30)
if 'vol_upshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线波动
uploadfactordict['vol_upshadow_std_1M'] = vol_upshadow_std(High,Open,Close,'1M',30)
if 'vol_upshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线稳定度
uploadfactordict['vol_upshadow_stable_1M'] = vol_upshadow_stable(High,Open,Close,'1M',30)
if 'vol_downshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线均值
uploadfactordict['vol_downshadow_avg_1M'] = vol_downshadow_avg(Low,Open,Close,'1M',30)
if 'vol_downshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线波动
uploadfactordict['vol_downshadow_std_1M'] = vol_downshadow_std(Low,Open,Close,'1M',30)
if 'vol_downshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线稳定度
uploadfactordict['vol_downshadow_stable_1M'] = vol_downshadow_stable(Low,Open,Close,'1M',30)
if 'vol_w_upshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线均值
uploadfactordict['vol_w_upshadow_avg_1M'] = vol_w_upshadow_avg(High,Close,'1M',30)
if 'vol_w_upshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线波动
uploadfactordict['vol_w_upshadow_std_1M'] = vol_w_upshadow_std(High,Close,'1M',30)
if 'vol_w_upshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线稳定度
uploadfactordict['vol_w_upshadow_stable_1M'] = vol_w_upshadow_stable(High,Close,'1M',30)
if 'vol_w_downshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线均值
uploadfactordict['vol_w_downshadow_avg_1M'] = vol_w_downshadow_avg(Low,Close,'1M',30)
if 'vol_w_downshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线波动
uploadfactordict['vol_w_downshadow_std_1M'] = vol_w_downshadow_std(Low,Close,'1M',30)
if 'vol_w_downshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线稳定度
uploadfactordict['vol_w_downshadow_stable_1M'] = vol_w_downshadow_stable(Low,Close,'1M',30)
if 'liq_turn_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月换手均值
uploadfactordict['liq_turn_avg_1M'] = liq_turn_avg(tovr_r,'1M',30)
if 'liq_turn_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月换手方差
uploadfactordict['liq_turn_std_1M'] = liq_turn_std(tovr_r,'1M',30)
if 'liq_vstd_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月成交波动比
uploadfactordict['liq_vstd_1M'] = liq_vstd(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子均值
uploadfactordict['liq_amihud_avg_1M'] = liq_amihud_avg(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子波动
uploadfactordict['liq_amihud_std_1M'] = liq_amihud_std(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子稳定度
uploadfactordict['liq_amihud_stable_1M'] = liq_amihud_stable(tovr_r,Close_ret,'1M',30)
if 'liq_shortcut_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子均值
uploadfactordict['liq_shortcut_avg_1M'] = liq_shortcut_avg(tovr_r,High,Low,Open,Close,'1M',30)
if 'liq_shortcut_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子波动
uploadfactordict['liq_shortcut_std_1M'] = liq_shortcut_std(tovr_r,High,Low,Open,Close,'1M',30)
if 'liq_shortcut_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子稳定度
uploadfactordict['liq_shortcut_stable_1M'] = liq_shortcut_stable(tovr_r,High,Low,Open,Close,'1M',30)
if 'PLUS' in factors_to_cal or factors_to_cal == 'all':
# 上下影线差
uploadfactordict['PLUS'] = PLUS(tempClose,tempHigh,tempLow)
if 'liq_std_w_plus_1M' in factors_to_cal or factors_to_cal == 'all':
# 上下影线差*换手波动
uploadfactordict['liq_std_w_plus_1M'] = liq_std_w_plus(tempClose,tempHigh,tempLow, tovr_r,'1M',30)
if 'HL_Sprd' in factors_to_cal or factors_to_cal == 'all':
# 理想振幅因子
uploadfactordict['HL_Sprd'] = HL_Sprd(Close,High,Low,20)
if 'tvr_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 换手率稳定度
uploadfactordict['tvr_std_1M'] = tvr_std(tovr_r,'1M',20)
if 'corr_price_turn_1M' in factors_to_cal or factors_to_cal == 'all':
# 换手率与价格的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-20:]
uploadfactordict['corr_price_turn_1M'] = corr_price_turn(timerange,pricebyday,'1M')
if 'corr_ret_turn_post_1M' in factors_to_cal or factors_to_cal == 'all':
# 收益率与换手率的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-21:] # 涉及到计算pct_change需要多一天
uploadfactordict['corr_ret_turn_post_1M'] = corr_ret_turn_post(timerange,pricebyday,'1M')
if 'corr_ret_turnd_1M' in factors_to_cal or factors_to_cal == 'all':
# 收益率与换手率变动的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-21:]
uploadfactordict['corr_ret_turnd_1M'] = corr_ret_turnd(timerange,pricebyday,'1M')
######################################## 日内数据计算因子 ####################################
# #单笔成交金额相关因子
sing_trade_amt = pd.DataFrame(minbar['total_turnover']/minbar['num_trades'],columns= ['single_trade_amt'])
sing_trade_amt = sing_trade_amt[sing_trade_amt['single_trade_amt']>0]
sing_trade_amt['trading_date'] = todaydate
sta_del_extrm = sing_trade_amt.groupby(level = 0).apply(lambda x: x.sort_values('single_trade_amt').iloc[:-10]).droplevel(0)# 剔除极大值
sta_50pct = sing_trade_amt.groupby(level = 0).\
apply(lambda x: x[x['single_trade_amt']<x['single_trade_amt'].quantile(0.5)]).droplevel(0)# 后百分之五十
if 'mts' in factors_to_cal or factors_to_cal == 'all':
# 主力交易强度
uploadfactordict['mts'] = mts(sta_del_extrm,minbar,todaydate)
if 'mte' in factors_to_cal or factors_to_cal == 'all':
# 主力交易情绪
uploadfactordict['mte'] = mte(sta_del_extrm,minbar,todaydate)
if 'qua' in factors_to_cal or factors_to_cal == 'all':
# 分位数因子qua
uploadfactordict['qua'] = qua(sta_del_extrm,todaydate)
if 'qua20m' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('qua',notst.index[-20],conn)
q = qua(sta_del_extrm,todaydate)
qua20m = pd.concat([prv_factor,q]).unstack().rolling(20,min_periods=1).mean().iloc[-1:].stack().rename(columns = {'qua':'qua20m'})
uploadfactordict['qua20m'] = qua20m
if 'skew' in factors_to_cal or factors_to_cal == 'all':
# 偏度因子skew
uploadfactordict['skew'] = skew(sta_50pct,todaydate)
if 'skew20m' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('skew',notst.index[-20],conn)
sk = skew(sta_50pct,todaydate)
skew20m = pd.concat([prv_factor,sk]).unstack().rolling(20,min_periods=1).mean().iloc[-1:].stack().rename(columns = {'skew':'skew20m'})
uploadfactordict['skew20m'] = skew20m
if 's_reverse' in factors_to_cal or factors_to_cal == 'all':
# 强反转因子
uploadfactordict['s_reverse'] = s_reverse(sing_trade_amt,minbar,todaydate)
if 's_reverse_10_sum' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('s_reverse',notst.index[-10],conn)
sr = s_reverse(sing_trade_amt,minbar,todaydate)
s_reverse_10_sum = pd.concat([prv_factor,sr]).unstack().rolling(10,min_periods=1).sum().iloc[-1:].stack().rename(columns = {'s_reverse':'s_reverse_10_sum'})
uploadfactordict['s_reverse_10_sum'] = s_reverse_10_sum
if 'daily_sta_90pct' in factors_to_cal or factors_to_cal == 'all':
# 理想反转因子
uploadfactordict['daily_sta_90pct'] = daily_sta_90pct(sta_del_extrm)
if 'ideal_reverse' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('daily_sta_90pct',notst.index[-20],conn)
dsta90 = daily_sta_90pct(sing_trade_amt)
daily_sta_cal = pd.concat([prv_factor,dsta90])
uploadfactordict['ideal_reverse'] = ideal_reverse(daily_sta_cal,Close)
return uploadfactordict
|
Alpha-Planet
|
/Alpha_Planet-0.0.2-py3-none-any.whl/Alpha_Planet/Factor_Calculator.py
|
Factor_Calculator.py
|
import pandas as pd
import numpy as np
class Factor_get_method(object):
def __init__(self) -> None:
pass
def get_all_tables(self,con):
sql = "select name from sqlite_master where type ='table' order by name"
c = con.cursor()
result = c.execute(sql)
factorfilelist = [i[0] for i in result.fetchall()]
return factorfilelist
def sql_fetch(self,con,tablename):
cursorObj = con.cursor()
cursorObj.execute('PRAGMA table_info("{}")'.format(tablename))
return cursorObj.fetchall()
def sql_exec(self,sql,sqlcols,conn):
cur = conn.cursor()
result = cur.execute(sql)
result = pd.DataFrame(result,columns = sqlcols).set_index(['date','symbol'])
return result
def get_prev_days_factor_by_name(self,factorname:str,date:str,conn):
sql = "select * from {} where {}.date >= '{}'".format(factorname,factorname,date)
sqlcols = [txt[1] for txt in self.sql_fetch(conn,factorname)]
return self.sql_exec(sql,sqlcols,conn)
def get_selected_date_factor_by_name(self,factorname:str,date:str,conn):
sql = "select * from {} where {}.date = '{}'".format(factorname,factorname,date)
sqlcols = [txt[1] for txt in self.sql_fetch(conn,factorname)]
return self.sql_exec(sql,sqlcols,conn)
def mmt_intraday_M(tempClose,tempOpen):
# 1个月日内动量
mmt_intraday_M = (tempClose/tempOpen - 1).iloc[-22:].cumsum()
mmt_intraday_M = pd.DataFrame(mmt_intraday_M.iloc[-1:].stack(),columns = ['mmt_intraday_M'])
return mmt_intraday_M
# 一个月振幅调整动量
def mmt_range_M(tempHigh,tempLow,tempClose):
High_m = tempHigh.iloc[-22:].max()
Low_m = tempLow.iloc[-22:].min()
mmt_range_M = (High_m-Low_m)/tempClose.shift(22)
mmt_range_M = pd.DataFrame(mmt_range_M.iloc[-1:].stack(),columns = ['mmt_range_M'])
return mmt_range_M
def mmt_overnight_M(tempOpen,tempClose):
# 隔夜动量
mmt_overnight = tempOpen/tempClose.shift(1) - 1
todaydate = mmt_overnight.index[-1]
mmt_overnight_M = pd.DataFrame(mmt_overnight.iloc[-20:].sum(),columns = ['mmt_overnight_M'])
mmt_overnight_M['date'] = todaydate
mmt_overnight_M = mmt_overnight_M.reset_index().set_index(['date','symbol'])
return mmt_overnight_M
def mmt_route_M(tempClose):
# 路径调整动量
mmt_route_M = (tempClose/tempClose.shift(20) - 1)/abs(tempClose/tempClose.shift(1)-1).rolling(20).sum()
mmt_route_M = pd.DataFrame(mmt_route_M.iloc[-1:].stack(),columns = ['mmt_route_M'])
return mmt_route_M
def mmt_discrete_M(tempClose):
# 信息离散度动量
daily_up = (tempClose/tempClose.shift(1)-1).applymap(lambda x: int(x>0) if not np.isnan(x) else np.nan)
daily_down = (tempClose/tempClose.shift(1)-1).applymap(lambda x: int(x<0) if not np.isnan(x) else np.nan)
mmt_discrete_M = daily_up.rolling(20).sum()/20-daily_down.rolling(20).sum()/20
mmt_discrete_M = pd.DataFrame(mmt_discrete_M.iloc[-1:].stack(),columns = ['mmt_discrete_M'])
return mmt_discrete_M
def mmt_sec_rank_M(tempClose):
# 截面rank动量
mmt_sec_rank_M = (tempClose/tempClose.shift(1)-1).rank(axis = 1).rolling(20).mean()
mmt_sec_rank_M = pd.DataFrame(mmt_sec_rank_M.iloc[-1:].stack(),columns = ['mmt_sec_rank_M'])
return mmt_sec_rank_M
def mmt_time_rank_M(anaual_close):
# 时序rank_score
# anaual_close = Close.iloc[-272:]
mmt_time_rank_M = (anaual_close/anaual_close.shift(1)-1).rolling(252,min_periods = 100).rank().rolling(20).mean()
mmt_time_rank_M = pd.DataFrame(mmt_time_rank_M.iloc[-1:].stack(),columns = ['mmt_time_rank_M'])
return mmt_time_rank_M
def mmt_highest_days_A(anaual_High):
# 最高价距今天数
todaydate = anaual_High.index[-1]
mmt_highest_days_A = 252- anaual_High.iloc[-252:].apply(lambda x: x.argmax())
mmt_highest_days_A = pd.DataFrame(mmt_highest_days_A,columns= ['mmt_highest_days_A'])
mmt_highest_days_A['date'] = todaydate
mmt_highest_days_A = mmt_highest_days_A.reset_index().set_index(['date','symbol'])
return mmt_highest_days_A
def volumestable(volume):
# 成交量稳定度
vol_m = volume.rolling(20).mean()
vol_std = volume.rolling(20).std()
volumestable = (vol_m/vol_std)
volumestable = pd.DataFrame(volumestable.iloc[-1:].stack(),columns = ['volumestable'])
return volumestable
def re_con(tempClose):
# 收益一致性因子
import numpy as np
d5_r = tempClose.pct_change(5).iloc[-1:]/5
d10_r = tempClose.pct_change(10).iloc[-1:]/10/np.sqrt(2)
d15_r = tempClose.pct_change(15).iloc[-1:]/15/np.sqrt(3)
con = pd.concat([d5_r.stack(),d10_r.stack(),d15_r.stack()],axis = 1).dropna()
con = con.mean(axis =1)/con.std(axis = 1)
con = con.unstack()
con_output = con.rank(axis = 1)
con_output = con_output.apply(lambda x: x-x.mean(),axis = 1).abs()
_con = pd.DataFrame(con_output.iloc[-1:].stack(),columns = ['_con'])
return _con
def bofu_money(tempHigh,tempLow,tempOpen,total_turnover):
# 波幅/成交额
bofu_money = (tempHigh-tempLow)/tempOpen/total_turnover
bofu_money = pd.DataFrame(bofu_money.iloc[-1:].stack(),columns = ['bofu_money'])
return bofu_money
def vol_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = pd.DataFrame(ret.std(),columns = ['vol_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_up_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = (ret*(ret>0).astype(int)).replace(0,np.nan)
df = pd.DataFrame(df.std(),columns = ['vol_up_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_down_std(df,periodname,perioddays):
ret = df.iloc[-perioddays:]
todaydate = ret.index[-1]
df = (ret*(ret<0).astype(int)).replace(0,np.nan)
df = pd.DataFrame(df.std(),columns = ['vol_down_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_highlow_avg(high,low,periodname,perioddays):
ratio = (high/low).iloc[-perioddays:]
todaydate = ratio.index[-1]
df = pd.DataFrame(ratio.mean(),columns = ['vol_highlow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_highlow_std(high,low,periodname,perioddays):
ratio = (high/low).iloc[-perioddays:]
todaydate = ratio.index[-1]
df = pd.DataFrame(ratio.std(),columns = ['vol_highlow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_updown_ratio(df,periodname,perioddays):
upstd = vol_up_std(df,periodname,perioddays)
downstd = vol_down_std(df,periodname,perioddays)
updownratio = pd.DataFrame(upstd['vol_up_std_'+periodname]/downstd['vol_down_std_'+periodname],columns = ['vol_updown_ratio_'+periodname])
return updownratio
def vol_highlow_stable(high,low,periodname,perioddays):
hlavg = vol_highlow_avg(high,low,periodname,perioddays)
hlstd = vol_highlow_std(high,low,periodname,perioddays)
hlstable = pd.DataFrame(hlavg['vol_highlow_avg_'+periodname]/hlstd['vol_highlow_std_'+periodname],columns = ['vol_highlow_stable_'+periodname])
return hlstable
def vol_upshadow_avg(High,Open,Close,periodname,perioddays):
multiper = (Open>Close).astype(int)
Open_Close_max = multiper*Open + (1-multiper)*Close
upshadow_df = ((High - Open_Close_max)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.mean(),columns = ['vol_upshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_upshadow_std(High,Open,Close,periodname,perioddays):
multiper = (Open>Close).astype(int)
Open_Close_max = multiper*Open + (1-multiper)*Close
upshadow_df = ((High - Open_Close_max)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.std(),columns = ['vol_upshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_upshadow_stable(High,Open,Close,periodname,perioddays):
avg = vol_upshadow_avg(High,Open,Close,periodname,perioddays)
std = vol_upshadow_std(High,Open,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_upshadow_avg_'+periodname]/std['vol_upshadow_std_'+periodname],columns = ['vol_upshadow_stable_'+periodname])
return df
def vol_downshadow_avg(Low,Open,Close,periodname,perioddays):
multiper = (Open<Close).astype(int)
Open_Close_min = multiper*Open + (1-multiper)*Close
downshadow_df = ((Open_Close_min - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.mean(),columns = ['vol_downshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_downshadow_std(Low,Open,Close,periodname,perioddays):
multiper = (Open<Close).astype(int)
Open_Close_min = multiper*Open + (1-multiper)*Close
downshadow_df = ((Open_Close_min - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.std(),columns = ['vol_downshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_downshadow_stable(Low,Open,Close,periodname,perioddays):
avg = vol_downshadow_avg(Low,Open,Close,periodname,perioddays)
std = vol_downshadow_std(Low,Open,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_downshadow_avg_'+periodname]/std['vol_downshadow_std_'+periodname],columns = ['vol_downshadow_stable_'+periodname])
return df
def vol_w_downshadow_avg(Low,Close,periodname,perioddays):
downshadow_df = ((Close - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.mean(),columns = ['vol_w_downshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_downshadow_std(Low,Close,periodname,perioddays):
downshadow_df = ((Close - Low)/Low).iloc[-perioddays:]
todaydate = downshadow_df.index[-1]
df = pd.DataFrame(downshadow_df.std(),columns = ['vol_w_downshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_downshadow_stable(Low,Close,periodname,perioddays):
avg = vol_w_downshadow_avg(Low,Close,periodname,perioddays)
std = vol_w_downshadow_std(Low,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_w_downshadow_avg_'+periodname]/std['vol_w_downshadow_std_'+periodname],columns = ['vol_w_downshadow_stable_'+periodname])
return df
def vol_w_upshadow_avg(High,Close,periodname,perioddays):
upshadow_df = ((High - Close)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.mean(),columns = ['vol_w_upshadow_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_upshadow_std(High,Close,periodname,perioddays):
upshadow_df = ((High - Close)/High).iloc[-perioddays:]
todaydate = upshadow_df.index[-1]
df = pd.DataFrame(upshadow_df.std(),columns = ['vol_w_upshadow_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def vol_w_upshadow_stable(High,Close,periodname,perioddays):
avg = vol_w_upshadow_avg(High,Close,periodname,perioddays)
std = vol_w_upshadow_std(High,Close,periodname,perioddays)
df = pd.DataFrame(avg['vol_w_upshadow_avg_'+periodname]/std['vol_w_upshadow_std_'+periodname],columns = ['vol_w_upshadow_stable_'+periodname])
return df
def liq_turn_avg(tovr_r,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r_df.index[-1]
df = pd.DataFrame(tovr_r_df.mean(),columns = ['liq_turn_avg_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_turn_std(tovr_r,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r_df.index[-1]
df = pd.DataFrame(tovr_r_df.std(),columns = ['liq_turn_std_'+periodname])
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_vstd(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df = ret.iloc[-perioddays:]
df = pd.DataFrame(tovr_r_df.mean()/ret_df.std(),columns = ['liq_vstd_'+periodname])
todaydate = tovr_r_df.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_avg(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df_abs = ret.iloc[-perioddays:].abs()
amihud = ret_df_abs/tovr_r_df
df = pd.DataFrame(amihud.mean(),columns = ['liq_amihud_avg_'+periodname])
todaydate = amihud.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_std(tovr_r,ret,periodname,perioddays):
tovr_r_df = tovr_r.iloc[-perioddays:]
ret_df_abs = ret.iloc[-perioddays:].abs()
amihud = ret_df_abs/tovr_r_df
df = pd.DataFrame(amihud.std(),columns = ['liq_amihud_std_'+periodname])
todaydate = amihud.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_amihud_stable(tovr_r,ret,periodname,perioddays):
avg = liq_amihud_avg(tovr_r,ret,periodname,perioddays)
std = liq_amihud_std(tovr_r,ret,periodname,perioddays)
v = avg['liq_amihud_avg_'+periodname]/std['liq_amihud_std_'+periodname]
df = pd.DataFrame(v,columns = ['liq_amihud_stable_'+periodname])
return df
def liq_shortcut_avg(tovr_r,High,Low,Open,Close,periodname,perioddays):
shortcut = 2*(High - Low) - (Open - Close).abs()
v = shortcut.iloc[-perioddays:]/tovr_r.iloc[-perioddays:]
df = pd.DataFrame(v.mean(),columns = ['liq_shortcut_avg_'+periodname])
todaydate = v.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_shortcut_std(tovr_r,High,Low,Open,Close,periodname,perioddays):
shortcut = 2*(High - Low) - (Open - Close).abs()
v = shortcut.iloc[-perioddays:]/tovr_r.iloc[-perioddays:]
df = pd.DataFrame(v.std(),columns = ['liq_shortcut_std_'+periodname])
todaydate = v.index[-1]
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_shortcut_stable(tovr_r,High,Low,Open,Close,periodname,perioddays):
avg = liq_shortcut_avg(tovr_r,High,Low,Open,Close,periodname,perioddays)
std = liq_shortcut_std(tovr_r,High,Low,Open,Close,periodname,perioddays)
v = avg['liq_shortcut_avg_'+periodname]/std['liq_shortcut_std_'+periodname]
df = pd.DataFrame(v,columns = ['liq_shortcut_stable_'+periodname])
return df
def PLUS(Close, High, Low):
plus = (2*Close - High - Low)/Close.shift(1)
todaydate = plus.index[-1]
df = pd.DataFrame(plus.iloc[-1])
df.columns = ['PLUS']
df['date'] = todaydate
df = df.reset_index().set_index(['date','symbol'])
return df
def liq_std_w_plus(Close, High, Low, tovr_r,periodname,perioddays):
plus = PLUS(Close, High, Low)
liq_std = liq_turn_std(tovr_r,periodname,perioddays)
plus['PLUS'] = plus['PLUS'].groupby(level = 'date', group_keys=False).apply(lambda x: x-min(0,x.min()))
swp = liq_std['liq_turn_std_'+periodname]*plus['PLUS']
df = pd.DataFrame(swp,columns = ['liq_std_w_plus_'+periodname])
return df
def tvr_std(tovr_r,periodname,perioddays):
df = tovr_r.iloc[-perioddays:]
todaydate = tovr_r.index[-1]
fc = pd.DataFrame(df.std())
fc.columns = ['tvr_std_'+periodname]
fc['date'] = todaydate
fc = fc.reset_index().set_index(['date','symbol'])
return fc.sort_index()
def HL_Sprd(close,high,low,perioddays):
todaydate = close.index[-1]
sprd = (high/low - 1).iloc[-perioddays:]
close_ = close.iloc[-perioddays:]
phigh = close_.apply(lambda x: x>x.quantile(0.75)).astype(int).replace(0,np.nan)
plow = close_.apply(lambda x: x<x.quantile(0.25)).astype(int).replace(0,np.nan)
vhigh = pd.DataFrame((sprd*phigh).mean())
vlow = pd.DataFrame((sprd*plow).mean())
vlow['date'],vhigh['date'] = todaydate,todaydate
vhigh = vhigh.set_index('date',append=True).swaplevel()
vlow = vlow.set_index('date',append=True).swaplevel()
hlsprd = vhigh-vlow
hlsprd.columns = ['HL_Sprd']
return hlsprd.dropna()
def corr_price_turn(timerange,pricebyday,periodname):
price = pricebyday.loc[timerange]
fc = price[['close','turnover_ratio']].groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['close']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_price_turn_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def corr_ret_turn_post(timerange,pricebyday,periodname):
pricedf = pricebyday.loc[timerange]
pricedf['turnover_ratio'] = pricedf['turnover_ratio'].unstack().sort_index().shift(1).stack() # post
pricedf['ret'] = pricedf['close'].unstack().sort_index().pct_change().stack()
fc = pricedf[['ret','turnover_ratio']].groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['ret']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_ret_turn_post_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def corr_ret_turnd(timerange,pricebyday,periodname):
pricedf = pricebyday.loc[timerange]
pricedf['turnover_ratio_pct'] = pricedf['turnover_ratio'].unstack().sort_index().pct_change().stack()
pricedf['ret'] = pricedf['close'].unstack().sort_index().pct_change().stack()
fc = pricedf[['ret','turnover_ratio_pct']].dropna().groupby(level = 'symbol').apply(lambda x: x.droplevel('symbol').sort_index().corr())
fc = list(fc.groupby(level = 1))[1][1].droplevel(1)['ret']
fc = pd.DataFrame(fc).dropna()
fc.columns =['corr_ret_turnd_'+periodname]
fc['date'] = timerange[-1]
fc = fc.reset_index().set_index(['date','symbol'])
return fc
def mts(sta_del_extrm,minbar,todaydate):
mts = sta_del_extrm[['single_trade_amt']]
mts['total_turnover'] = minbar['total_turnover']
mts = mts.groupby(level = 'symbol').corr()[::2]['total_turnover'].droplevel(1)
mts= pd.DataFrame(mts)
mts.columns = ['mts']
mts['date'] = todaydate
mts = mts.reset_index().set_index(['date','symbol'])
return mts
def mte(sta_del_extrm,minbar,todaydate):
mte = sta_del_extrm[['single_trade_amt']]
mte['close'] = minbar['close']
mte = mte.groupby(level = 'symbol').corr()[::2]['close'].droplevel(1)
mte= pd.DataFrame(mte)
mte.columns = ['mte']
mte['date'] = todaydate
mte = mte.reset_index().set_index(['date','symbol'])
return mte
def qua(sta_del_extrm,todaydate):
qua = sta_del_extrm.groupby(level = 'symbol').\
apply(lambda x: (x['single_trade_amt'].quantile(0.1)-\
x['single_trade_amt'].min())/(x['single_trade_amt'].max()-x['single_trade_amt'].min()))
qua = pd.DataFrame(qua,columns = ['qua'])
qua['date'] = todaydate
qua = qua.reset_index().set_index(['date','symbol'])
qua.index.name = ('date','symbol')
return qua
def skew(sta_50pct,todaydate):# 偏度因子skew
skew = sta_50pct.groupby(level = 'symbol').\
apply(lambda x: (((x['single_trade_amt']-x['single_trade_amt'].mean())/x['single_trade_amt'].std())**3).mean())
skew = pd.DataFrame(skew,columns = ['skew'])
skew['date'] = todaydate
skew = skew.reset_index().set_index(['date','symbol'])
skew.index.name = ('date','symbol')
return skew
def s_reverse(sing_trade_amt,minbar,todaydate):# 强反转因子
minute_r = sing_trade_amt.copy()
minute_r['minute_r'] = minbar['close']/minbar['open'] - 1
minute_r = minute_r.set_index('trading_date',append = True)
s_reverse = minute_r.groupby(level = 0).\
apply(lambda x: x[x.single_trade_amt > x.single_trade_amt.quantile(0.8)].minute_r.sum())
s_reverse = pd.DataFrame(s_reverse,columns = ['s_reverse'])
s_reverse['date'] = todaydate
s_reverse = s_reverse.reset_index().set_index(['date','symbol'])
s_reverse.index.name = ('date','symbol')
return s_reverse
def daily_sta_90pct(sta_del_extrm):# 日单笔成交额90分位值
daily_sta = sta_del_extrm.set_index('trading_date',append = True).rename_axis(index = {'trading_date':'date'})
daily_sta_90pct = daily_sta.droplevel('datetime').groupby(level = 'symbol').apply(lambda x: x.groupby(level = 1).quantile(0.9)).reset_index().set_index(['date','symbol'])
daily_sta_90pct.columns = ['daily_sta_90pct']
return daily_sta_90pct
def ideal_reverse(daily_sta_cal,Close):
daily_sta_cal['day_return'] = Close.pct_change().stack()
by_stock = list(daily_sta_cal.groupby(level = 1))
def apply_rolling_cal(rollingdata):
if len(rollingdata.index)<20:
return
else:
temp = rollingdata.sort_values('daily_sta_90pct')
returndf = rollingdata.iloc[-1:].copy()
returndf['ideal_reverse'] = temp.iloc[:10].day_return.sum() - temp.iloc[10:].day_return.sum()
return returndf['ideal_reverse']
ideal_reverse = list(map(lambda x:apply_rolling_cal(x[1]),by_stock))
ideal_reverse = pd.concat(ideal_reverse)
ideal_reverse = pd.DataFrame(ideal_reverse)
ideal_reverse.columns = ['ideal_reverse']
return ideal_reverse
|
Alpha-Rabbit
|
/Alpha_Rabbit-1.4.17.tar.gz/Alpha_Rabbit-1.4.17/Alpha_Rabbit/Factor_Def_and_Get_Method.py
|
Factor_Def_and_Get_Method.py
|
import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
from tqdm import *
class single_signal_test(object):
def __init__(self) -> None:
pass
def cal_turnover(self,df,ndays):
# holdings:
# pd.Series
# multiindex: timestamp,code
# 值都是1
holdings = df.copy()
holdings = holdings.unstack().dropna(how ='all',axis = 1)
holdings = holdings.apply(lambda x: x/x.sum(),axis = 1)
holdings = holdings.fillna(0)
return (holdings.diff(ndays).abs().sum(axis = 1)/2)
def cal_holdingnums(self,df):
# holdings:
# pd.Series
# multiindex: timestamp,code
# 值都是1
holdings = df.copy()
holdings = holdings.groupby(level = 0).sum()
return holdings
def one_factor_grouper(self,df,factorname,quantiles,qcut): # 分组
# concatdf:pd.DataFrame
# factorname: str
# multiindex: timestamp,code
# columns: nday_return, factorname1, factorname2...
concatdf = df[[factorname]].copy().round(6)# 设置保留小数!
concatdf[factorname+'_rank'] = concatdf[factorname].groupby(level = 'date', group_keys = False).rank()
if qcut:
concatdf[factorname+'_quantile'] =concatdf[factorname+'_rank'].dropna().groupby(level = 'date', group_keys = False).apply(lambda x: pd.qcut(x,quantiles,labels=list(range(1,quantiles+1)))).astype(int)
else:
concatdf[factorname+'_quantile'] =concatdf[factorname+'_rank'].dropna().groupby(level = 'date', group_keys = False).apply(lambda x: pd.cut(x,quantiles,labels=list(range(1,quantiles+1)))).astype(int)
return concatdf
def one_factor_return(self,df,factorname,ndays,return_col,w_method,demean = False): # 计算分组收益
if w_method =='average':
qreturn = df.groupby(level = 'date', group_keys = True).apply(lambda x: x.groupby(factorname+'_quantile')[[return_col]].mean()/ndays).unstack()
qreturn.columns = [i[1] for i in list(qreturn)]
if w_method == 'factor_weighted':
tmpdf = df.copy()
tmpdf['rwf'] = tmpdf[return_col]*tmpdf[factorname]
tmpdf.dropna(subset = ['rwf'],inplace = True)
qreturn = tmpdf.groupby(level = 'date', group_keys = True).\
apply(lambda x: x.groupby(factorname+'_quantile').\
apply(lambda x: x['rwf'].sum()/x[factorname].sum() if x[factorname].sum()>0 else 0)/ndays)
# qreturn = tmpdf.groupby(level = 'date', group_keys = True).\
# apply(lambda x: x.groupby(factorname+'_quantile').\
# apply(lambda x: (x[return_col]*x[factorname]).sum()/x[factorname].sum())/ndays)
if w_method =='cap_weighted':
qreturn = df.groupby(level = 'date', group_keys = True).\
apply(lambda x: x.groupby(factorname+'_quantile').\
apply(lambda x: (x[return_col]*x['cap']).sum()/x['cap'].sum())/ndays)
if len(qreturn.index.names)==1:
pass
else:
qreturn= qreturn.unstack().apply(lambda x: x.fillna(x.mean()),axis = 1)
if demean:
qreturn = qreturn.apply(lambda x: x-x.mean(),axis = 1)
return qreturn
def one_factor_icir(self,df,factorname,return_col):
from scipy import stats
ic = df.groupby(level = 'date').apply(lambda x: x[[return_col,factorname]].corr('spearman'))
ic_org = ic[ic.index.get_level_values(1) ==return_col][factorname].dropna()
ictable = ic_org.describe()[['count','mean','std','min','max']].copy()
ictable['risk_adj'] = ic_org.mean()/ic_org.std()
ictable['skew'] = ic_org.skew()
ictable['kurtosis'] = ic_org.kurtosis()
if ictable['mean'] <0:
ictable['p-value'] = stats.ttest_1samp(ic_org,0,alternative='less').pvalue
else:
ictable['p-value'] = stats.ttest_1samp(ic_org,0,alternative='greater').pvalue
return ictable
def one_factor_ret_sharp(self,qreturn,ret_freq):
return qreturn.mean()/qreturn.std()*np.sqrt(252/ret_freq)
def factor_prepare(self,allfactors,fc,quantiles,qcut):
test_fc = allfactors[[fc]].copy().rename_axis(['date','symbol'])
res_df = self.one_factor_grouper(test_fc,fc,quantiles,qcut)# 序数标准化
return res_df
def factor_ret_test_sheet(self,
weight_method,
index_price, #'''基准日期是timestamp'''
fcname,
res_df,
Price,
days,
savedir,
demean = False):
from alphalens import utils
plottools = plot_tools()
mate_al = mate_alphalens()
tov_df = res_df.groupby(by = fcname+'_quantile').apply(lambda x: self.cal_turnover(x[fcname+'_quantile']/x[fcname+'_quantile'],days))
if len(tov_df.index.names)==1:
to = tov_df.mean(axis =1)
else:
to = tov_df.unstack().mean(axis = 1)
clean_factor,price = mate_al.index_mate(res_df.dropna(),Price)
fwr = utils.compute_forward_returns(price.stack(),price)
clean_factor[str(days)+'D'] = fwr[str(days)+'D']
clean_factor = clean_factor.reset_index()
clean_factor['date'] = clean_factor['date'].astype(str)
clean_factor = clean_factor.set_index(['date','asset']).dropna()
if index_price is not None:
clean_factor = mate_al.trans_ex_return(clean_factor,index_price,ret_col=[str(days)+'D'])
else:
clean_factor[str(days)+'D'] = clean_factor.groupby(level = 'date',group_keys = False).apply(lambda x: x[str(days)+'D']-x[str(days)+'D'].mean())
qreturn = self.one_factor_return(clean_factor,fcname,days,str(days)+'D',w_method = weight_method,demean=demean)
# ic
ic_table = self.one_factor_icir(clean_factor,fcname,str(days)+'D')
indicators = self.judge_material(qreturn,ic_table,days)
plottools.factor_plt(qreturn,to,indicators,fcname,days,savedir)
return qreturn,clean_factor,indicators
def judge_material(self,qreturn,ic_table,days):
from scipy import stats
indicators = ic_table.copy()
maxquantile = max(qreturn.columns)
lsret = qreturn[maxquantile] - qreturn[1]
groupmean = qreturn.mean(axis = 0)
groupmean_diff = groupmean.diff().dropna()
top_half = groupmean_diff.iloc[-5:]
top_sharp = qreturn[maxquantile].mean()/qreturn[maxquantile].std()*pow(252/days,1/2)
t,p_lsret =stats.ttest_1samp(lsret,0,alternative='greater')
t,p_groupmean =stats.ttest_1samp(groupmean_diff,0,alternative='greater')
t,p_tophalfmean = stats.ttest_1samp(top_half,0,alternative='greater')
indicators['TopQtl_SR'] = top_sharp
indicators['LSRet_pvalue'] = p_lsret
indicators['MeanRetDiff_pvalue'] = p_groupmean
indicators['TophalfMeanRetDiff_pvalue'] = p_tophalfmean
return indicators
def efficient_judge(self,indicators):
from scipy import stats
# 因子判别
'''
检验:
对两个都通过的是有用的因子
'''
if indicators['p-value']<= 0.05 and indicators['TopQtl_SR']>=1:
if indicators['TophalfMeanRetDiff_pvalue']<=0.3 and indicators['LSRet_pvalue']<=0.12:
# print(fc+'有用;头部{}组平均收益一阶差分p值{},多空收益p值{},ic_pvalue{},top组超额夏普{}'.format(int(maxquantile/2),p_top_halfmean,p_lsret))
return 1
# 且两个乘起来能跟原来的匹配;且另一个不能太差
# elif indicators['TophalfMeanRetDiff_pvalue']*indicators['LSRet_pvalue']<0.0025 and (indicators['TophalfMeanRetDiff_pvalue']/0.05 <= 0.1 or indicators['LSRet_pvalue']/0.05 <= 0.1) \
# and min(indicators['MeanRetDiff_pvalue'],indicators['TophalfMeanRetDiff_pvalue'])<=0.05 and indicators['TophalfMeanRetDiff_pvalue']<0.3:
# print(fc+'勉强有用;头部{}组平均收益一阶差分p值{},整体平均收益一阶差分p值{},多空收益p值{}'.format(int(maxquantile/2),p_top_halfmean,p_groupmean,p_lsret))
else:
return 2
return 0
def eff_classification(self,fc,indicator,judgefunc,strict_eff,unstrict_eff):
'''
输入:
因子矩阵
输出:
1、因子测试结果
2、噪声因子
'''
# 因子判别
if judgefunc(indicator) == 1:
strict_eff.append(fc)
unstrict_eff.append(fc)
elif judgefunc(indicator) == 2:
unstrict_eff.append(fc)
return strict_eff,unstrict_eff
class multi_factor_test(object):
def __init__(self) -> None:
self.sst = single_signal_test()
pass
def factors_abnormal_ret(self,factordf,return_col,factorlist,days,index_price = None,pricedf = None,longshort_return = False):
df = factordf.copy()
if pricedf is not None:
# 默认明收除今收
df[str(days)+'D'] = pricedf.pct_change(days,fill_method = None).shift(-days).stack()
if index_price is not None:
ml = mate_alphalens()
df,pricedf = ml.index_mate(df,pricedf)
df = ml.trans_ex_return(df,index_price,str(days)+'D')
df = df.rename(columns = {str(days)+'D':return_col+str(days)+'D'}).dropna(subset = return_col+str(days)+'D')
if longshort_return == False:
ret_k = df.groupby(level = 'date',group_keys = False).apply(lambda x: sm.formula.ols(return_col+str(days)+'D'+'~'+'+'.join(factorlist),data = x).fit().params)
del ret_k['Intercept']
else :
lscol = list(factordf)
quantiles = int(df[return_col+str(days)+'D'].groupby(level = 'date').count().mean()//100)
LSretList = []
for col in tqdm(lscol):
tmpdf = df.copy()
tmpdf[col+'_quantile'] = self.sst.one_factor_grouper(df,col,quantiles,False)[col+'_quantile']
qreturn = self.sst.one_factor_return(tmpdf,col,days,'ret{}D'.format(days),'factor_weighted',False)
LSretList.append(qreturn[max(list(qreturn))] - qreturn[1])
ret_k = pd.concat(LSretList,axis = 1)
ret_k.columns = lscol
return ret_k
def multif_barra_norm(self,allfactors,Bft):
df = allfactors.copy()
print('barra中性化....')
for fcname in tqdm(list(df)):
test_fc = df[[fcname]].copy().rename_axis(['date','symbol'])
residual_ols,params_ols = Bft.barra_compose(test_fc)
df[fcname] = residual_ols # 中性化之后的因子替换原始因子
return df
def multif_industry_norm(self,allfactors,industry_info):
df = allfactors.copy()
df['first_industry_name'] = industry_info
df = df.dropna(subset = 'first_industry_name').groupby(level = 'date',group_keys =False).apply(lambda x: x.groupby(by = 'first_industry_name',group_keys =False).apply(lambda x:x-x.mean(numeric_only=True)))
del df['first_industry_name']
return df
def multif_corr_ana(self,df,factornamelist): # 多因子相关性分析
# df:pd.DataFrame
# factornamelist: strlist
# multiindex: timestamp,code
# columns: nday_return, factorname1, factorname2...
df_ana = df[factornamelist].groupby(level = 'date').corr()
corr_mean = df_ana.groupby(level = 1).mean() # corr之后的矩阵第二层没名字,所以用1来表示;第二层是因子名
corr_ir = df_ana.groupby(level = 1).mean()/df_ana.groupby(level = 1).std()
return corr_mean.loc[list(corr_mean)],corr_ir.loc[list(corr_ir)]
def multif_pca_ana(self,originalFactor,domain_factor_nums): # 多因子pca分析
# originalFactor: pd.DataFrame
# multiindex: timestamp,code
# columns: factorname1, factorname2...
from sklearn import preprocessing
data = originalFactor.groupby(level = 'date', group_keys = False).apply(lambda x: preprocessing.scale(x))
data = np.vstack(data.values)
from sklearn.decomposition import PCA
pcaModel = PCA(domain_factor_nums)
pcaModel.fit(data)
pcaFactors = pcaModel.transform(data)
pcaFactors = pd.DataFrame(pcaFactors)
pcaFactors.index = originalFactor.index
pcaFactors.columns = ['pca_'+str(i) for i in range(domain_factor_nums)]
return pcaModel.explained_variance_,pcaModel.explained_variance_ratio_,pcaFactors
def batch_factors_test(self,weight_method,allfactors,Price,quantiles,days,qcut,savedir,index_price = None,demean = False):
returndict = {}
sst = single_signal_test()
for fc in tqdm(list(allfactors)):
res_df = sst.factor_prepare(allfactors,fc,quantiles,qcut)
sst.factor_ret_test_sheet(weight_method,index_price,fc,res_df,Price,days,savedir,demean)
returndict[fc] = res_df[[fc]]
return returndict
def multif_tsstable_test(self,originalData):
# originalFactor: pd.DataFrame
# multiindex: timestamp,code
# columns: factorname1, factorname2...
from statsmodels.tsa.stattools import adfuller
data = originalData.copy()#.groupby(level = 0).apply(lambda x: (x-x.mean())/x.std())不要再标准化了!!
mean_pvalue = data.groupby(level = 'date').apply(lambda x:x.mean()).apply(lambda x: adfuller(x)[1])
std_pvalue = data.groupby(level = 'date').apply(lambda x:x.std()).apply(lambda x: adfuller(x)[1])
skew_pvalue = data.groupby(level = 'date').apply(lambda x:x.skew()).apply(lambda x: adfuller(x)[1])
kurt_pvalue = data.groupby(level = 'date').apply(lambda x:x.kurt()).apply(lambda x: adfuller(x)[1])
yarn_pvalue = pd.concat([mean_pvalue,std_pvalue,skew_pvalue,kurt_pvalue],axis = 1)
yarn_pvalue.columns = ['mean','std','skew','kurt']
return yarn_pvalue
def del_updown_limit(self,factordf,daybar,text):
# 剔除涨跌停
notuplimit = daybar[~(daybar[text] == daybar.limit_up)]
notdownlimit = daybar[~(daybar[text] == daybar.limit_down)]
factordf = factordf[factordf.index.isin(notuplimit.index)]
factordf = factordf[factordf.index.isin(notdownlimit.index)]
return factordf
def in_some_pool(self,df,pool_components):
factordf = df.copy()
factordf['inpool']=pool_components.applymap(lambda x:1)
factordf['inpool'] = factordf['inpool'].apply(lambda x: 1 if x>0 else 0)
testdf = factordf[factordf['inpool']>=1]
del testdf['inpool']
return testdf
def orthog(self,factor_mat, y, xlist):
df = factor_mat.replace([np.inf, -np.inf], np.nan).dropna()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
params = regre.params[~(regre.params.index == 'Intercept')]
intercept = regre.params[(regre.params.index == 'Intercept')]
residual = df[y] - (df[list(params.index)]*params).sum(axis = 1) - intercept.values
residual = pd.DataFrame(residual)
residual.columns = [y]
return self.mat_normlize(residual),params
def mat_orthog(self,factor_mat):
temp1 = factor_mat.replace([np.inf, -np.inf], np.nan).dropna()
for i in list(temp1):
no = list(temp1).index(i)
if no==0:
temp1[i] = self.mat_normlize(temp1[i])
continue
fclist = list(filter(lambda x: x!=i,list(temp1)[:no]))
temp1[i] = self.orthog(temp1,i,fclist)[0]
return temp1
def ts_mat_orthog(self,factor_mat):
return factor_mat.groupby(level = 'date',group_keys = False).apply(self.mat_orthog)
def mat_normlize(self,factor_mat):
df = factor_mat.rename_axis(['date','symbol']).replace([np.inf, -np.inf], np.nan)
def norm(x):
return (x - x.min())/(x.max()-x.min())
return df.groupby(level = 'date',group_keys = False).apply(norm)
def mat_ranknormlize(self,factor_mat):
df = factor_mat.rename_axis(['date','symbol']).replace([np.inf, -np.inf], np.nan)
def norm(x):
x_rank = x.rank()
return (x_rank - x_rank.min())/(x_rank.max()-x_rank.min())
return df.groupby(level = 'date',group_keys = False).apply(norm)
def multindex_shift(self,fcdf):
df = fcdf.reset_index()
datelist = list(df['date'].drop_duplicates())
datedict = dict(zip(datelist[:-1],datelist[1:]))
df['date'] =df['date'].apply(lambda x: datedict[x] if x in datedict.keys() else np.nan)
return df.dropna(subset = 'date').set_index(['date','symbol'])
class Barra_factor_ana(object):
'''
1. growth要求至少504天的数据,部分股票不满足该条件会导致在因子整合到一起的时候被剔除
2. barrafactor必须为双重索引,且第一重索引是日期,第二重索引是标的
'''
def __init__(self,df=None,start_date=None,end_date=None,dir=None,skip_fileload=None) -> None:
# 预加载数据
if not skip_fileload:
self.price = df
dailyreturn = df/df.shift(1)-1
dailyreturn.dropna(how = 'all',inplace=True)
self.returndata = dailyreturn
self.start_date = start_date
self.end_date = end_date
import os
filelist = os.listdir(dir)
self.filedict = {}
for f in filelist:
if f[-3:]=='csv':
self.filedict[f[:-4]] = pd.read_csv(dir+f,index_col = [0,1])
pass
def rise_barra_factors(self):
print('rise size')
self.size = np.log(self.filedict['market_cap']).dropna()
def OLSparams(y,x):
print('rise beta')
X_ = x.droplevel('order_book_id')
df = y.copy()
df['market_r'] = X_['r']
df.dropna(subset = 'market_r',inplace = True)
dflist = list(df.rolling(252))[252:]
paramslist = []
for olsdf in dflist:
mod = sm.OLS(olsdf,sm.add_constant(olsdf['market_r']))
re = mod.fit()
params = re.params.T
params.index = olsdf.columns
params = params[params.index!='market_r']
params['date'] = olsdf.index[-1]
params = params.rename(columns = {'market_r':'beta'})
paramslist.append(params)
olsparams = pd.concat(paramslist).set_index('date',append=True).unstack().T
constdf = olsparams.loc['const'].ewm(halflife = 63,ignore_na = True,adjust = False).mean().stack()
betadf = olsparams.loc['beta'].ewm(halflife = 63,ignore_na = True,adjust = False).mean().stack()
# cal residual
mkt_df = pd.concat([X_['r']]*len(list(betadf.unstack())),axis = 1)
mkt_df.columns = list(betadf.unstack())
residual = y - betadf.unstack()*mkt_df - constdf.unstack() # 这里的residual已经是经过ewm的beta和const计算得到的就不用再ewm了
return {'beta':betadf,'const':constdf,'residual':residual}
def MOMTM(y):
df = np.log(1+y)
momtm = df.ewm(halflife=126,ignore_na = True,adjust = False).mean()#.iloc[-1:]
return momtm
def CMRA(y,T):
date = y.index[-1]
dflist= []
for i in range(1,T+1):
pct_n_month = pd.DataFrame((y/y.shift(21*i)-1).iloc[-1])/21
dflist.append(pct_n_month)
df = pd.concat(dflist,axis =1)
zmax = df.max(axis =1)
zmin = df.min(axis = 1)
cmra = pd.DataFrame(np.log(1+zmax)-np.log(1+zmin),columns = [date]).T
return cmra
def orthog(barrafactor,y,xlist):
df = barrafactor.copy()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
for p in xlist:
df[p]*= regre.params[p]
df[y+'_orth'] = df[y] - df[xlist].sum(axis = 1)-regre.params['Intercept']
return df[[y+'_orth']]
# beta
self.olsparams = OLSparams(self.returndata,self.filedict['market_r'])
self.beta = pd.DataFrame(self.olsparams['beta']).dropna()
self.beta.columns = ['beta']
# momentum
print('rise momentum')
# retroll504 = list(self.returndata.rolling(504))[504:]
# self.momtm = pd.concat(list(map(lambda x: MOMTM(x),retroll504))).shift(21).dropna(how = 'all')
self.momtm = MOMTM(self.returndata).shift(21).dropna(how = 'all')
self.momtm = pd.DataFrame(self.momtm.stack(),columns=['momentum'])
# residual volatility
print('rise residual volatility')
self.hist_volatility = self.returndata.ewm(halflife = 42,ignore_na = True,adjust = False).std().dropna(how = 'all')
CMRAlist = list(self.price.rolling(252))[252:]
self.CMRA = pd.concat(list(map(lambda x: CMRA(x,12),CMRAlist)))
self.Hsigma = self.olsparams['residual'].rolling(252,min_periods = 1).std()
self.residual_volatility = pd.DataFrame((self.hist_volatility*0.74+self.CMRA*0.16+self.Hsigma*0.1).stack()).dropna()
self.residual_volatility.columns = ['residual_volatility']
# non-linear size
print('rise non-linear size')
self.nlsize = (self.size**3).dropna()
self.nlsize.columns = ['nlsize']
# Bp
print('rise Bp')
self.Bp = self.filedict['Bp'].dropna()
# liquidity
print('rise Liquidity')
self.tvrdf = self.filedict['turnover']
self.liq_1m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(22,min_periods =1).mean())
self.liq_3m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(74,min_periods =1).mean())
self.liq_12m = self.tvrdf.groupby(level = 1, group_keys = False).apply(lambda x: x.sort_index().rolling(252,min_periods =1).mean())
self.liq = (0.35*self.liq_1m + 0.35*self.liq_3m + 0.3*self.liq_12m).dropna()
print('rise Earning Yield')
self.earning_yield = pd.concat([self.filedict['Ep'],self.filedict['Sp']],axis = 1)
self.earning_yield['earning_yield'] = self.earning_yield['ep_ratio_ttm']*0.66+self.earning_yield['sp_ratio_ttm']*0.34
self.earning_yield = self.earning_yield[['earning_yield']].dropna()
# growth
print('rise growth')
NP = self.filedict['NPGO'].unstack()
NP = (NP-NP.shift(504))/NP.shift(504).abs().replace(0,np.nan)
NP = NP.stack()
RVN = self.filedict['RGO'].unstack()
RVN = (RVN - RVN.shift(504))/RVN.shift(504).abs().replace(0,np.nan)
RVN = RVN.stack()
self.growth = pd.DataFrame(NP['net_profit_parent_company_ttm_0']*0.34+RVN['revenue_ttm_0']*0.66)
self.growth.columns = ['growth']
self.growth.dropna(inplace=True)
# leverage
print('rise leverage')
self.leverage = self.filedict['MLEV']['du_equity_multiplier_ttm']*0.38+self.filedict['DTOA']['debt_to_asset_ratio_ttm']*0.35+self.filedict['BLEV']['book_leverage_ttm']*0.27
self.leverage = pd.DataFrame(self.leverage)
self.leverage.columns = ['leverage']
self.leverage.dropna(inplace=True)
# concat
self.barrafactor = pd.concat([
self.size,
self.beta,
self.momtm,
self.residual_volatility,
self.nlsize,
self.Bp,
self.liq,
self.earning_yield,
self.growth,
self.leverage],axis = 1).sort_index(level = 0)
'''正则化'''
# 未经正则化的原始因子已存为类变量,可直接调用
print('Orthogonalizing....')
y = ['residual_volatility','nlsize','turnover']
xlist = ['circulation_A','beta']
# 不dropna会报错
self.barrafactor[y[0]] = self.barrafactor[[y[0]]+xlist].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[0],xlist))
self.barrafactor[y[1]] = self.barrafactor[[y[1]]+xlist[:1]].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[1],xlist[:1]))
self.barrafactor[y[2]] = self.barrafactor[[y[2]]+xlist[:1]].dropna().groupby(level = 0, group_keys = False).apply(lambda x: orthog(x,y[2],xlist[:1]))
# 标准化
def return_barra_factor(self,rank_normalize:bool):
mft = multi_factor_test()
if rank_normalize:
return mft.mat_ranknormlize(self.barrafactor)
else:
return mft.mat_normlize(self.barrafactor)
def barra_compose(self,factordata):
# 因子是rank数据
decompose = pd.concat([self.barrafactor,factordata],axis = 1).dropna().rename_axis(['date','symbol'])
def orthog(barrafactor,y,xlist):
df = barrafactor.copy()
regre = sm.formula.ols(y+'~'+'+'.join(xlist),data = df).fit()
params = regre.params[~(regre.params.index == 'Intercept')]
intercept = regre.params[(regre.params.index == 'Intercept')]
residual = df[y] - (df[list(params.index)]*params).sum(axis = 1) - intercept.values
return residual,params
# 这种方法只算一天的会错
# residual_ols =decompose.groupby(level = 0).apply(lambda x: orthog(x,list(decompose)[-1],list(decompose)[:-1])[0]).droplevel(0)
# params_ols =decompose.groupby(level = 0).apply(lambda x: orthog(x,list(decompose)[-1],list(decompose)[:-1])[1])
# return residual_ols,params_ols
decomposebyday = list(decompose.groupby(level = 'date'))
residual_olslist = []
params_olslist = []
for df in decomposebyday:
x = df[1]
residual_ols,params_ols = orthog(x,list(decompose)[-1],list(decompose)[:-1])
residual_olslist.append(residual_ols)
params_olslist.append(pd.DataFrame(params_ols,columns = [df[0]]).T)
return pd.concat(residual_olslist),pd.concat(params_olslist)
def barra_style_pool(self,style,cutnum):
bystyle = self.barrafactor[[style]].copy()
bystyle[style+'_group'] = bystyle[style].dropna().groupby(level = 0,group_keys=False).apply(lambda x: pd.cut(x,cutnum,labels=list(range(1,cutnum+1))))
return bystyle
def factor_performance_bystyle(self,factordata,factorname,style,cutnum):
# 即便因子在风格上没有偏斜,仍然会有不同风格上因子表现不同的情况
bystyle = pd.concat([factordata,self.barrafactor[[style]]],axis = 1)
bystyle[style+'_group'] = bystyle[style].dropna().groupby(level = 0,group_keys=False).apply(lambda x: pd.cut(x,cutnum,labels=list(range(1,cutnum+1))))
ic_daily = bystyle.groupby(style+'_group',group_keys=False).apply(lambda x: x[[factorname,'nday_return']].groupby(level = 0).apply(lambda x: x.corr('spearman').iloc[0,1])).T
return ic_daily
class AutoMatic(object):
sst = single_signal_test()
mft = multi_factor_test()
def __init__(self,Bft,base_index,Price,quantiles,days,qcut,demean,weighted_method) -> None:
'''base_index基准价格时间索引是timestamp'''
self.Bft = Bft
self.Price = Price.copy()
self.base_index = base_index
self.quantiles = quantiles
self.days = days
self.qcut = qcut
self.demean = demean
self.weighted_method = weighted_method
pass
def AutoMatic_DirCheck(self,path):
if not os.path.exists(path):
os.makedirs(path)
def AutoMatic_Direc_Adjust(self,factors,dir_):
neu_factors = factors.copy()
direction_dict = {}
strict_eff = []
unstrict_eff = []
self.AutoMatic_DirCheck(dir_+'direction/')
self.AutoMatic_DirCheck(dir_+'direction/redirection/')
for fc in list(neu_factors):
res_df = self.sst.factor_prepare(neu_factors,fc,self.quantiles,self.qcut)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,tmp,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,fc,res_df,self.Price,self.days,dir_+'/direction/',self.demean)
if qreturn[list(qreturn)[0]].sum()<= qreturn[self.quantiles].sum():
print(fc+'是正向因子')
direction_dict[fc] = 1
if qreturn[list(qreturn)[0]].sum() > qreturn[self.quantiles].sum():
print(fc+'是负向因子')
neu_factors = neu_factors.copy()
neu_factors[fc]=self.mft.mat_normlize(-1*neu_factors[fc])
direction_dict[fc] = -1
res_df = self.sst.factor_prepare(neu_factors,fc,self.quantiles,self.qcut)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,tmp,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,fc,res_df,self.Price,self.days,dir_+'direction/redirection/',self.demean)
# 有效性判别
strict_eff,unstrict_eff = self.sst.eff_classification(fc,indicator,self.sst.efficient_judge,strict_eff,unstrict_eff)
return direction_dict,strict_eff,unstrict_eff
def AutoMatic_Factor_Merge_Ret(self,neu_factors,base_factors,mergename,dir_):
base_f = pd.DataFrame(neu_factors[base_factors].sum(axis = 1),columns = [mergename])
res_df = self.sst.factor_prepare(base_f,mergename,self.quantiles,self.qcut)
self.AutoMatic_DirCheck(dir_)
if self.weighted_method == 'cap_weighted':
res_df['cap'] = self.cap
qreturn,clean_factor,indicator = self.sst.factor_ret_test_sheet(self.weighted_method,self.base_index,mergename,res_df,self.Price,self.days,dir_,self.demean)
return qreturn,clean_factor
def AutoMatic_Compare_Indicator(self,qreturn,reverse):
if reverse:
maxq = min(list(qreturn))
base_perf_sr = -1*qreturn[maxq].mean()/qreturn[maxq].std()
return base_perf_sr
maxq = max(list(qreturn))
base_perf_sr = qreturn[maxq].mean()/qreturn[maxq].std()
return base_perf_sr
def threads_pool_run(self,params_batch):
InSampleFactors,i,dir_ = params_batch[0] , params_batch[1] , params_batch[2]
import matplotlib
matplotlib.use('agg')
savedir = dir_+'{}/'.format(i)
direction_dict,strict_eff,unstrict_eff =self.AutoMatic_Direc_Adjust(InSampleFactors,savedir) # 方向调整
return (direction_dict,strict_eff,unstrict_eff)
def AutoMatic_Stochastic_Optimizer(self,test_factor,threads_num,dir_):
dateset = list(set(test_factor.index.get_level_values('date')))
import multiprocessing
from multiprocessing import Pool
InSplList = []
for i in range(threads_num):
randomdate = sorted(np.random.choice(dateset,int(len(dateset)/5),replace = False))
InSplList.append((test_factor.loc[randomdate],i,dir_))
pool = Pool(min(multiprocessing.cpu_count(),threads_num))
return pool.map(self.threads_pool_run,InSplList)
def AutoMatic_Perf_InPool(self,neu_factors,base_factors,reverse,save_dir):
qreturn,tmp =self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors,'basef',save_dir+'temp/')
base_perf_sr= self.AutoMatic_Compare_Indicator(qreturn,reverse)
others = list(filter(lambda x: x not in base_factors,list(neu_factors)))
for sf in others:# 挨个加表现
print(base_factors)
qreturn,tmp =self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors+[sf],sf+'_basef',save_dir+'temp/')
perf_sr = self.AutoMatic_Compare_Indicator(qreturn,reverse)
print('是否超越此前表现:{};本次超额夏普:{},此前最佳超额夏普:{}'.format(perf_sr > base_perf_sr,perf_sr,base_perf_sr))
if perf_sr > base_perf_sr:
base_factors.append(sf)
if perf_sr > base_perf_sr:
base_perf_sr = perf_sr
qreturn,clean_factor = self.AutoMatic_Factor_Merge_Ret(neu_factors,base_factors,'basef',save_dir+'final/')
return qreturn,clean_factor
class plot_tools(object):
def __init__(self) -> None:
import matplotlib
self.plotstatus = matplotlib.get_backend()
pass
def trio_plt(self,qmean,qcum,quantiles): # 画收益图
import matplotlib.pyplot as plt
qmean[list(range(1,quantiles+1))].plot(kind= 'bar',title = 'mean')
plt.show()
qcum[list(range(1,quantiles+1))].plot(title = 'cumreturn')
plt.legend(loc = 'upper center',bbox_to_anchor=(1.1, 1.02))
plt.show()
(qcum[10]-qcum[1]).plot(title = 'long-short')
plt.show()
def fbplot(self,frontplot,bgplot,c,fname,bname):
# frontplot,bgplot:
# pd.Series
# multiindex: timestamp,code
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
tickspace = len(frontplot)//12
fig = plt.figure()
a1=fig.add_axes([0,0,1,1])
a1.bar(frontplot.index,bgplot.loc[frontplot.index],color = c)
a1.tick_params(axis='x', labelrotation= 30)
a1.xaxis.set_major_locator(ticker.MultipleLocator(tickspace))
a2 = a1.twinx()
a2.plot(frontplot.index,frontplot,color = 'red')
a2.tick_params(axis='x', labelrotation= 30)
a2.xaxis.set_major_locator(ticker.MultipleLocator(tickspace))
fig.legend(frameon = False,labels = [bname+'(left)',fname+'(right)'],loc = 'upper center')
plt.show()
def factor_plt(self,qreturn,to,ictable,fc,ndays,savedir=''):
from alphalens import utils
from pandas.plotting import table
numtable = pd.concat([qreturn.mean(),qreturn.sum(),qreturn.mean()/qreturn.std()],axis = 1).rename(columns= {0:'avg',1:'sum',2:'risk-adj'}).T
top_quantile = max(list(qreturn))
totalSeed = qreturn.index
xticks = list(range(0, len(totalSeed), 60))
xlabels = [str(totalSeed[x]) for x in xticks]
import matplotlib.pyplot as plt
plt.figure(dpi=300, figsize=(24, 12))
ax = plt.subplot(321,frame_on=False,title = fc+'_retsheet_bygroup')
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, numtable.round(5), loc='center') # 将df换成需要保存的dataframe即可
ax = plt.subplot(365,frame_on=False,title = str(ndays)+'days_information')
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, ictable.round(5), loc='center') # 将df换成需要保存的dataframe即可
plt.subplot(325,title = fc+'_cumret_bygroup')
plt.plot(qreturn.index,qreturn.cumsum(),label = list(qreturn))
plt.legend()
plt.xticks(rotation=90)
plt.xticks(ticks=xticks, labels=xlabels)
plt.subplot(324,title = fc+'_turnover_bygroup')
plt.bar(to.index,to,color="blue")
plt.subplot(323,title = fc+'_avgret_bygroup')
plt.bar(qreturn.mean().index,qreturn.mean(),color="y")
plt.subplot(326,title = fc+'_lsret_bygroup')
plt.plot(qreturn.index,(qreturn[top_quantile]-qreturn[1]).cumsum(),color="g")
plt.xticks(rotation=90)
plt.xticks(ticks=xticks, labels=xlabels)
try:
os.remove(savedir+fc+'.jpg')
print(fc+'.jpg'+' 旧文件删除')
except:
print(fc+'.jpg'+' 是新文件')
plt.savefig(savedir+fc+'.jpg')
if self.plotstatus != 'agg':
plt.show()
plt.close()
# 热力图展示
def ShowHeatMap(self,DataFrame,savedir='',triangle = True):
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(35, 15))
ax.set_title('Wine GRA')
# 设置展示一半,如果不需要注释掉mask即可
if triangle:
mask = np.zeros_like(DataFrame)
mask[np.triu_indices_from(mask)] = True # np.triu_indices 上三角矩阵
with sns.axes_style("white"):
sns.heatmap(DataFrame,
cmap="YlGnBu",
annot=True,
mask=mask,
)
else :
with sns.axes_style("white"):
sns.heatmap(DataFrame,
cmap="YlGnBu",
annot=True,
)
plt.savefig(savedir)
if self.plotstatus != 'agg':
plt.show()
def combine_imgs_pdf(self,folder_path, pdf_file_path,idstname):
import os
from PIL import Image
"""
合成文件夹下的所有图片为pdf
Args:
folder_path (str): 源文件夹
pdf_file_path (str): 输出路径
"""
files = os.listdir(folder_path)
png_files = []
sources = []
for file in files:
if 'png' in file or 'jpg' in file:
png_files.append(folder_path + file)
png_files.sort()
for file in png_files:
png_file = Image.open(file)
png_file = png_file.convert("RGB")
sources.append(png_file)
sources[0].save(pdf_file_path+'{}.pdf'.format(idstname), "pdf", save_all=True, append_images=sources[1:],quality = 95)
class mate_alphalens(object):
def __init__(self) -> None:
pass
def index_mate(self,factordata,price):
fcdf = factordata.reset_index()
fcdf['date'] = pd.to_datetime(fcdf['date'])
fcdf = fcdf.rename(columns = {'symbol':'asset'}).set_index(['date','asset'])
ptemp = price.copy()
ptemp.index = pd.to_datetime(ptemp.index)
return fcdf,ptemp
def trans_ex_return(self,clean_factor,index_price,ret_col):
from alphalens import utils
index_price['factor'] = 1
base_ret = utils.compute_forward_returns(index_price[['factor']],index_price['close'].unstack())
base_ret = base_ret.droplevel('asset').reindex(clean_factor.index.get_level_values(0))
base_ret['asset'] = clean_factor.index.get_level_values('asset')
base_ret = base_ret.set_index(['asset'],append=True)
df = clean_factor.copy()
df[ret_col]= df[ret_col]-base_ret[ret_col]
return df
class alert(object):
def __init__(self,**file):
if file:
self.filename = file
else:
import sys
self.filename = sys.argv[0]
pass
def finish_alert(self):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
# 1. 连接邮箱服务器
con = smtplib.SMTP_SSL('smtp.qq.com', 465)
# 2. 登录邮箱
con.login('[email protected]', 'jwtjvrktevlobiag')
# 2. 准备数据
# 创建邮件对象
msg = MIMEMultipart()
# 设置邮件主题
subject = Header('{}程序运行完毕'.format(self.filename), 'utf-8').encode()
msg['Subject'] = subject
# 设置邮件发送者
msg['From'] = '[email protected]'
# 设置邮件接受者
msg['To'] = '[email protected]'
# 添加⽂文字内容
text = MIMEText('{}程序运行完毕'.format(self.filename), 'plain', 'utf-8')
msg.attach(text)
# 3.发送邮件
con.sendmail('[email protected]', '[email protected]', msg.as_string())
con.quit()
def breakdown_alert(self):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
# 1. 连接邮箱服务器
con = smtplib.SMTP_SSL('smtp.qq.com', 465)
# 2. 登录邮箱
con.login('[email protected]', 'jwtjvrktevlobiag')
# 2. 准备数据
# 创建邮件对象
msg = MIMEMultipart()
# 设置邮件主题
subject = Header('{}程序运行失败'.format(self.filename), 'utf-8').encode()
msg['Subject'] = subject
# 设置邮件发送者
msg['From'] = '[email protected]'
# 设置邮件接受者
msg['To'] = '[email protected]'
# 添加⽂文字内容
text = MIMEText('{}程序运行失败'.format(self.filename), 'plain', 'utf-8')
msg.attach(text)
# 3.发送邮件
con.sendmail('[email protected]', '[email protected]', msg.as_string())
con.quit()
# sst:
# def noise_judge(self,qreturn,fc):
# from scipy import stats
# from statsmodels.stats.diagnostic import acorr_ljungbox
# # 因子判别
# lsret,groupmean,groupmean_diff,top_sharp = self.judge_material(qreturn,fc)
# '''
# 检验:
# 对两个都没通过的可能是噪声的因子做自相关性检验,因为0假设是有相关性,所以哪怕只有一点自相关性(123123)都可能不会被拒绝,所以被拒绝的基本上可认定为噪声
# '''
# t,p_lsret =stats.ttest_1samp(lsret,0,alternative='greater')
# t,p_groupmean = stats.ttest_1samp(groupmean_diff,0,alternative='greater')
# if p_groupmean>0.05 and p_lsret>0.05:
# print(fc+'可能是噪声;分组平均收益一阶差分p值{},多空收益p值{}'.format(p_groupmean,p_lsret))
# # ls_ljung = acorr_ljungbox(lsret.cumsum(), lags=[1,5,10,20])
# gmdf_ljung = acorr_ljungbox(groupmean, lags=[1,5])
# if gmdf_ljung['lb_pvalue'].min()>=0.05:
# print(fc+'是噪声;分组平均收益自相关检验最小p值{}'.format(gmdf_ljung['lb_pvalue'].min()))
# return True
# else:
# print('无法判定'+fc+'是噪声;分组平均收益自相关检验最小p值{}'.format(gmdf_ljung['lb_pvalue'].min()))
# return False
# def short_judge(self,qreturn,fc):
# from scipy import stats
# # 因子判别
# lsret,groupmean,groupmean_diff = self.judge_material(qreturn,fc)
# '''
# 检验:
# 对两个都通过的是有用的因子
# '''
# maxquantile = max(list(lsret))
# top5 = groupmean_diff.iloc[-5:]
# bottom5 = groupmean_diff.iloc[:5]
# t,p_top5 = stats.ttest_1samp(top5,0,alternative='greater')
# t,p_bottom5 = stats.ttest_1samp(bottom5,0,alternative='greater')
# if p_top5>0.5 and p_bottom5<0.1 and (abs(groupmean.iloc[-1])<abs(groupmean.iloc[0])):
# print(fc+'是空头因子;top5组平均收益一阶差分p值{},bottom5组平均收益一阶差分p值{}'.format(p_top5,p_bottom5))
# return True
# return False
# mft:
# def multif_denoisies(self,noise_factors_list,allfactors,threshold):
# '''
# 输入:
# 因子矩阵,噪声因子
# 输出:
# 去噪后的因子
# '''
# if len(noise_factors_list)==0:
# print('无可用于去噪的噪声')
# return allfactors
# other_factors_df = allfactors[list(filter(lambda x: x not in noise_factors_list,list(allfactors)))]
# noise_factors_df = self.ts_mat_orthog(allfactors[noise_factors_list])
# factordf = pd.concat([other_factors_df,noise_factors_df],axis = 1)
# # 去噪
# other_factors = list(other_factors_df)
# corrdf = self.multif_corr_ana(factordf,list(factordf))[0]
# print('相关性详情:')
# print(corrdf)
# corrdf = corrdf.loc[other_factors,noise_factors_list].abs().max(axis = 1)
# print('要被去噪的因子:')
# corr_with_noise = list(corrdf[corrdf>=threshold].index)
# print(corr_with_noise)
# for fc in corr_with_noise:
# factordf[fc] = self.orthog(factordf, fc, noise_factors_list)[0]
# return factordf[other_factors]
# def multif_cal_weight(self,factordf,factorlist,return_col,weight_type):
# # factordf: pd.DataFrame
# # multiindex: timestamp,code
# # columns: factorname1, factorname2...,returndata
# # factorlist: strlist
# # return_col: column name, str
# df = factordf.copy()
# ret_k = self.fators_abnormal_ret(df,return_col,factorlist)
# ic = df.groupby(level = 'date').apply(lambda x: x.corr(method= 'spearman')[return_col])
# del ic['ret']
# weight = ret_k*ic
# direc = ic.mean().apply(lambda x: 1 if x>0 else -1)
# if weight_type == 1:
# return weight.mean()/weight.std()*direc
# elif weight_type == 2:
# return weight.mean()*direc
# else:
# return direc
# # if weight_type == '风险平价加权':
# # cov = weight[factorlist].cov()
# # from scipy.optimize import minimize
# # def objective(x):
# # w_cov = np.dot(cov,x.T)
# # for n in range(len(x)):
# # w_cov[n] *= x[n]
# # mat = np.array([w_cov]*len(x))
# # scale = 1/sum(abs(mat))
# # return np.sum(abs(scale*(mat-mat.T)))
# # initial_w=np.array([0.2]*len(factorlist))
# # cons = []
# # cons.append({'type':'eq','fun':lambda x: sum(x)-1})
# # for i in range(len(initial_w)):
# # cons.append({'type':'ineq','fun':lambda x: x[i]})
# # #结果
# # res=minimize(objective,initial_w,method='SLSQP',constraints=cons)
# # params = pd.Series(res.x)
# # params.index = cov.index
# # return params
# def weighted_factor(self,factordf,weight):
# # factordf: pd.DataFrame
# # multiindex: timestamp,code
# # columns: factorname1, factorname2...
# # weight:pd.Series
# wf = (weight*factordf).sum(axis = 1)
# return pd.DataFrame(wf,columns = ['weighted_factor'])
|
Alpha-Rabbit
|
/Alpha_Rabbit-1.4.17.tar.gz/Alpha_Rabbit-1.4.17/Alpha_Rabbit/Alpha_Rabbit.py
|
Alpha_Rabbit.py
|
from Alpha_Rabbit.Factor_Def_and_Get_Method import *
_method = Factor_get_method()
def Factor_Calculator(pricebyday,minbar,conn,todaydate,notst,factors_to_cal):
######################################## 日间数据计算因子 ####################################
uploadfactordict = {}
Close = pricebyday['close'].unstack().sort_index()
Open = pricebyday['open'].unstack().sort_index()
High = pricebyday['high'].unstack().sort_index()
Low = pricebyday['low'].unstack().sort_index()
volume = pricebyday[['volume']].pivot_table(index = 'date',columns = 'symbol',values = 'volume').sort_index()
total_turnover = pricebyday[['total_turnover']].pivot_table(index = 'date',columns = 'symbol',values = 'total_turnover').sort_index()
tovr_r = pricebyday['turnover_ratio'].unstack().sort_index()
Close_ret = Close.pct_change()
tempClose = Close.iloc[-30:]
tempOpen = Open.iloc[-30:]
tempHigh = High.iloc[-30:]
tempLow = Low.iloc[-30:]
anaual_close = Close.iloc[-272:]
anaual_High = High.iloc[-272:]
if 'mmt_intraday_M' in factors_to_cal or factors_to_cal == 'all':
# 1个月日内动量
uploadfactordict['mmt_intraday_M'] = mmt_intraday_M(tempClose,tempOpen)
if 'mmt_range_M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅调整动量
uploadfactordict['mmt_range_M'] = mmt_range_M(tempHigh,tempLow,tempClose)
if 'mmt_overnight_M' in factors_to_cal or factors_to_cal == 'all':
# 隔夜动量
uploadfactordict['mmt_overnight_M'] = mmt_overnight_M(tempOpen,tempClose)
if 'mmt_route_M' in factors_to_cal or factors_to_cal == 'all':
# 路径调整动量
uploadfactordict['mmt_route_M'] = mmt_route_M(tempClose)
if 'mmt_discrete_M' in factors_to_cal or factors_to_cal == 'all':
# 信息离散度动量
uploadfactordict['mmt_discrete_M'] = mmt_discrete_M(tempClose)
if 'mmt_sec_rank_M' in factors_to_cal or factors_to_cal == 'all':
# 截面rank动量
uploadfactordict['mmt_sec_rank_M'] = mmt_sec_rank_M(tempClose)
if 'mmt_time_rank_M' in factors_to_cal or factors_to_cal == 'all':
# 时序rank_score
uploadfactordict['mmt_time_rank_M'] = mmt_time_rank_M(anaual_close)
if 'mmt_highest_days_A' in factors_to_cal or factors_to_cal == 'all':
# 最高价距今天数
uploadfactordict['mmt_highest_days_A'] = mmt_highest_days_A(anaual_High)
if 'volumestable' in factors_to_cal or factors_to_cal == 'all':
# 成交量稳定度
uploadfactordict['volumestable'] = volumestable(volume)
if '_con' in factors_to_cal or factors_to_cal == 'all':
# 收益一致性因子
uploadfactordict['_con'] = re_con(tempClose)
if 'bofu_money' in factors_to_cal or factors_to_cal == 'all':
# 波幅/成交额
uploadfactordict['bofu_money'] = bofu_money(tempHigh,tempLow,tempOpen,total_turnover)
if 'vol_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月收益率波动
uploadfactordict['vol_std_1M'] = vol_std(Close_ret,'1M',30)
if 'vol_up_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上行收益率波动
uploadfactordict['vol_up_std_1M'] = vol_up_std(Close_ret,'1M',30)
if 'vol_down_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下行收益率波动
uploadfactordict['vol_down_std_1M'] = vol_down_std(Close_ret,'1M',30)
if 'vol_updown_ratio_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上行波动和下行波动比
uploadfactordict['vol_updown_ratio_1M'] = vol_updown_ratio(Close_ret,'1M',30)
if 'vol_highlow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅均值
uploadfactordict['vol_highlow_avg_1M'] = vol_highlow_avg(High,Low,'1M',30)
if 'vol_highlow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅波动
uploadfactordict['vol_highlow_std_1M'] = vol_highlow_std(High,Low,'1M',30)
if 'vol_highlow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月振幅稳定度
uploadfactordict['vol_highlow_stable_1M'] = vol_highlow_stable(High,Low,'1M',30)
if 'vol_upshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线均值
uploadfactordict['vol_upshadow_avg_1M'] = vol_upshadow_avg(High,Open,Close,'1M',30)
if 'vol_upshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线波动
uploadfactordict['vol_upshadow_std_1M'] = vol_upshadow_std(High,Open,Close,'1M',30)
if 'vol_upshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月上影线稳定度
uploadfactordict['vol_upshadow_stable_1M'] = vol_upshadow_stable(High,Open,Close,'1M',30)
if 'vol_downshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线均值
uploadfactordict['vol_downshadow_avg_1M'] = vol_downshadow_avg(Low,Open,Close,'1M',30)
if 'vol_downshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线波动
uploadfactordict['vol_downshadow_std_1M'] = vol_downshadow_std(Low,Open,Close,'1M',30)
if 'vol_downshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月下影线稳定度
uploadfactordict['vol_downshadow_stable_1M'] = vol_downshadow_stable(Low,Open,Close,'1M',30)
if 'vol_w_upshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线均值
uploadfactordict['vol_w_upshadow_avg_1M'] = vol_w_upshadow_avg(High,Close,'1M',30)
if 'vol_w_upshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线波动
uploadfactordict['vol_w_upshadow_std_1M'] = vol_w_upshadow_std(High,Close,'1M',30)
if 'vol_w_upshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉上影线稳定度
uploadfactordict['vol_w_upshadow_stable_1M'] = vol_w_upshadow_stable(High,Close,'1M',30)
if 'vol_w_downshadow_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线均值
uploadfactordict['vol_w_downshadow_avg_1M'] = vol_w_downshadow_avg(Low,Close,'1M',30)
if 'vol_w_downshadow_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线波动
uploadfactordict['vol_w_downshadow_std_1M'] = vol_w_downshadow_std(Low,Close,'1M',30)
if 'vol_w_downshadow_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月威廉下影线稳定度
uploadfactordict['vol_w_downshadow_stable_1M'] = vol_w_downshadow_stable(Low,Close,'1M',30)
if 'liq_turn_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月换手均值
uploadfactordict['liq_turn_avg_1M'] = liq_turn_avg(tovr_r,'1M',30)
if 'liq_turn_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月换手方差
uploadfactordict['liq_turn_std_1M'] = liq_turn_std(tovr_r,'1M',30)
if 'liq_vstd_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月成交波动比
uploadfactordict['liq_vstd_1M'] = liq_vstd(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子均值
uploadfactordict['liq_amihud_avg_1M'] = liq_amihud_avg(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子波动
uploadfactordict['liq_amihud_std_1M'] = liq_amihud_std(tovr_r,Close_ret,'1M',30)
if 'liq_amihud_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月amihud非流动因子稳定度
uploadfactordict['liq_amihud_stable_1M'] = liq_amihud_stable(tovr_r,Close_ret,'1M',30)
if 'liq_shortcut_avg_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子均值
uploadfactordict['liq_shortcut_avg_1M'] = liq_shortcut_avg(tovr_r,High,Low,Open,Close,'1M',30)
if 'liq_shortcut_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子波动
uploadfactordict['liq_shortcut_std_1M'] = liq_shortcut_std(tovr_r,High,Low,Open,Close,'1M',30)
if 'liq_shortcut_stable_1M' in factors_to_cal or factors_to_cal == 'all':
# 一个月最短路径非流动因子稳定度
uploadfactordict['liq_shortcut_stable_1M'] = liq_shortcut_stable(tovr_r,High,Low,Open,Close,'1M',30)
if 'PLUS' in factors_to_cal or factors_to_cal == 'all':
# 上下影线差
uploadfactordict['PLUS'] = PLUS(tempClose,tempHigh,tempLow)
if 'liq_std_w_plus_1M' in factors_to_cal or factors_to_cal == 'all':
# 上下影线差*换手波动
uploadfactordict['liq_std_w_plus_1M'] = liq_std_w_plus(tempClose,tempHigh,tempLow, tovr_r,'1M',30)
if 'HL_Sprd' in factors_to_cal or factors_to_cal == 'all':
# 理想振幅因子
uploadfactordict['HL_Sprd'] = HL_Sprd(Close,High,Low,20)
if 'tvr_std_1M' in factors_to_cal or factors_to_cal == 'all':
# 换手率稳定度
uploadfactordict['tvr_std_1M'] = tvr_std(tovr_r,'1M',20)
if 'corr_price_turn_1M' in factors_to_cal or factors_to_cal == 'all':
# 换手率与价格的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-20:]
uploadfactordict['corr_price_turn_1M'] = corr_price_turn(timerange,pricebyday,'1M')
if 'corr_ret_turn_post_1M' in factors_to_cal or factors_to_cal == 'all':
# 收益率与换手率的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-21:] # 涉及到计算pct_change需要多一天
uploadfactordict['corr_ret_turn_post_1M'] = corr_ret_turn_post(timerange,pricebyday,'1M')
if 'corr_ret_turnd_1M' in factors_to_cal or factors_to_cal == 'all':
# 收益率与换手率变动的相关性因子
timerange = sorted(list(pricebyday.index.get_level_values('date').drop_duplicates()))[-21:]
uploadfactordict['corr_ret_turnd_1M'] = corr_ret_turnd(timerange,pricebyday,'1M')
######################################## 日内数据计算因子 ####################################
# #单笔成交金额相关因子
sing_trade_amt = pd.DataFrame(minbar['total_turnover']/minbar['num_trades'],columns= ['single_trade_amt'])
sing_trade_amt = sing_trade_amt[sing_trade_amt['single_trade_amt']>0]
sing_trade_amt['trading_date'] = todaydate
sta_del_extrm = sing_trade_amt.groupby(level = 0).apply(lambda x: x.sort_values('single_trade_amt').iloc[:-10]).droplevel(0)# 剔除极大值
sta_50pct = sing_trade_amt.groupby(level = 0).\
apply(lambda x: x[x['single_trade_amt']<x['single_trade_amt'].quantile(0.5)]).droplevel(0)# 后百分之五十
if 'mts' in factors_to_cal or factors_to_cal == 'all':
# 主力交易强度
uploadfactordict['mts'] = mts(sta_del_extrm,minbar,todaydate)
if 'mte' in factors_to_cal or factors_to_cal == 'all':
# 主力交易情绪
uploadfactordict['mte'] = mte(sta_del_extrm,minbar,todaydate)
if 'qua' in factors_to_cal or factors_to_cal == 'all':
# 分位数因子qua
uploadfactordict['qua'] = qua(sta_del_extrm,todaydate)
if 'qua20m' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('qua',notst.index[-20],conn)
q = qua(sta_del_extrm,todaydate)
qua20m = pd.concat([prv_factor,q]).unstack().rolling(20,min_periods=1).mean().iloc[-1:].stack().rename(columns = {'qua':'qua20m'})
uploadfactordict['qua20m'] = qua20m
if 'skew' in factors_to_cal or factors_to_cal == 'all':
# 偏度因子skew
uploadfactordict['skew'] = skew(sta_50pct,todaydate)
if 'skew20m' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('skew',notst.index[-20],conn)
sk = skew(sta_50pct,todaydate)
skew20m = pd.concat([prv_factor,sk]).unstack().rolling(20,min_periods=1).mean().iloc[-1:].stack().rename(columns = {'skew':'skew20m'})
uploadfactordict['skew20m'] = skew20m
if 's_reverse' in factors_to_cal or factors_to_cal == 'all':
# 强反转因子
uploadfactordict['s_reverse'] = s_reverse(sing_trade_amt,minbar,todaydate)
if 's_reverse_10_sum' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('s_reverse',notst.index[-10],conn)
sr = s_reverse(sing_trade_amt,minbar,todaydate)
s_reverse_10_sum = pd.concat([prv_factor,sr]).unstack().rolling(10,min_periods=1).sum().iloc[-1:].stack().rename(columns = {'s_reverse':'s_reverse_10_sum'})
uploadfactordict['s_reverse_10_sum'] = s_reverse_10_sum
if 'daily_sta_90pct' in factors_to_cal or factors_to_cal == 'all':
# 理想反转因子
uploadfactordict['daily_sta_90pct'] = daily_sta_90pct(sta_del_extrm)
if 'ideal_reverse' in factors_to_cal or factors_to_cal == 'all':
prv_factor = _method.get_prev_days_factor_by_name('daily_sta_90pct',notst.index[-20],conn)
dsta90 = daily_sta_90pct(sing_trade_amt)
daily_sta_cal = pd.concat([prv_factor,dsta90])
uploadfactordict['ideal_reverse'] = ideal_reverse(daily_sta_cal,Close)
return uploadfactordict
|
Alpha-Rabbit
|
/Alpha_Rabbit-1.4.17.tar.gz/Alpha_Rabbit-1.4.17/Alpha_Rabbit/Factor_Calculator.py
|
Factor_Calculator.py
|
# Alpha-Vantage-API-Data - Financial Data Retrieving
Alpha-Vantage-API-Data is a package for collecting stocks price and
their related information.
Notice : Please go visit https://www.alphavantage.co/support/#api-key to get your own
free API key.
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install Alpha-Vantage-API-Data.
```bash
pip install Alpha-Vantage-API-Data
```
## Example
```python
from Alpha_Vantage_API_Data import API_Data_Retrieves
# returns stock daily result
StockTimeSeries = API_Data_Retrieves.StockTimeSeries(apikey)
daily_data = StockTimeSeries.GetDailyStockPrice("IBM")
print(daily_data)
# returns company information
FundamentalData = API_Data_Retrieves.FundamentalData(apikey)
stock_company_info = FundamentalData.CompanyInfo("AAPL")
print(stock_company_info)
```
Go to [Alpha_Vantage_API_Project Issues Page](https://github.com/codemakerss/Alpha_Vantage_API_Project/issues) to report any issues.
## Other
For more information, please visit [Alpha Vantage Official Page](https://www.alphavantage.co) and their
[documents](https://www.alphavantage.co/documentation/).
## License
[MIT](https://choosealicense.com/licenses/mit/)
|
Alpha-Vantage-API-Data
|
/Alpha_Vantage_API_Data-1.0.3.tar.gz/Alpha_Vantage_API_Data-1.0.3/README.md
|
README.md
|
import sys
# Do not show error traceback
sys.tracebacklimit=0
# Check if all packages installed
try:
from pandas.core.frame import DataFrame
import pandas as pd
except ImportError as e:
print("Package <pandas> needed to be installed before getting data ! ")
raise e
try:
import requests
except ImportError as e:
print("Package <requests> needed to be installed before getting data ! ")
raise e
try:
import xlwt
except ImportError as e:
print("Package <xlwt> needed to be installed before getting data ! ")
raise e
try:
import csv
except ImportError as e:
print("Package <csv> needed to be installed before getting data ! ")
raise e
class StockTimeSeries(object):
def __init__(self, apikey : str):
self.apikey = apikey
# Get Stock Information
# daily stock price
def GetDailyStockPrice(self, stock_id : str) -> DataFrame:
"""return DataFrame type daily stock price
The results will show daily stock price
Parameters
----------
stock_id : str
Choose the stock you want to get daily data
"""
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_DAILY", "symbol": stock_id, "outputsize": "full","apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
daily_data = data["Time Series (Daily)"]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. volume"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. volume" : "Volume"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# weekly stock price
def GetWeeklyStockPrice(self, stock_id : str) -> DataFrame:
"""return DataFrame type weekly stock price
The results will show weekly stock price
Parameters
----------
stock_id : str
Choose the stock you want to get weekly data
"""
# https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY_ADJUSTED&symbol=IBM&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_WEEKLY_ADJUSTED", "symbol": stock_id,"apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
daily_data = data["Weekly Adjusted Time Series"]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. adjusted close", "6. volume", "7. dividend amount"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. adjusted close" : "Adjusted Close", "6. volume" : "Volume", "7. dividend amount" : "Dividend Amount"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# monthly stock price
def GetMonthlyStockPrice(self, stock_id : str) -> DataFrame:
"""return DataFrame type monthly stock price
The results will show monthly stock price
Parameters
----------
stock_id : str
Choose the stock you want to get monthly data
"""
# https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY_ADJUSTED&symbol=IBM&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_MONTHLY_ADJUSTED", "symbol": stock_id,"apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
daily_data = data["Monthly Adjusted Time Series"]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. adjusted close", "6. volume", "7. dividend amount"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. adjusted close" : "Adjusted Close", "6. volume" : "Volume", "7. dividend amount" : "Dividend Amount"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# intraday stock price - most recent 1 to 2 months data
def GetIntradayStockPrice(self, stock_id : str, interval : str) -> DataFrame:
"""return DataFrame type intraday stock price
The results will show intraday stock price at certain
interval you choose
Parameters
----------
stock_id : str
Choose the stock you want to get intraday data
interval : str
Choose "1min" or "5min" or "15min" or "30min" or "60min" at time interval for intraday data
"""
# https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=IBM&interval=5min&outputsize=full&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "TIME_SERIES_INTRADAY", "symbol": stock_id, "interval": interval, "outputsize": "full", "apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
ts_str = "Time Series (" + interval + ")"
daily_data = data[ts_str]
for dict in daily_data:
small_dict = daily_data[dict]
small_dict["Datetime"] = dict
df = df.append(pd.DataFrame([small_dict]))
df_new = df[["Datetime", "1. open", "2. high", "3. low", "4. close", "5. volume"]]
df_new = df_new.rename(columns = {"1. open" : "Open", "2. high": "High", "3. low" : "Low", "4. close" : "Close", "5. volume" : "Volume"})
col_name = df_new.columns.tolist()
col_name.insert(0,"Symbol")
df_new = df_new.reindex(columns=col_name)
df_new["Symbol"] = stock_id
return df_new
# Symbol, Name, Type, Region, MarketOpen, MarketClose, Timezone, Currency, MatchScore
def GetSearchEndpoint(self, find_stock : str) -> DataFrame:
# https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords=tesco&apikey=demo
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {"function": "SYMBOL_SEARCH", "keywords": find_stock, "apikey": self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
data = data["bestMatches"]
for dict in data:
df = df.append(pd.DataFrame([dict]))
df_new = df.loc[:,["1. symbol", "2. name", "3. type", "4. region", "5. marketOpen", "6. marketClose", "7. timezone", "8. currency", "9. matchScore"]]
df_new = df_new.rename(columns= {"1. symbol" : 'Symbol', "2. name" : 'Name', "3. type" : 'Type', "4. region" : 'Region', "5. marketOpen" : 'MarketOpen', "6. marketClose" : 'MarketClose', "7. timezone" : 'Timezone', "8. currency" : 'Currency', "9. matchScore" : 'MatchScore'})
return df_new
class FundamentalData(object):
def __init__(self, apikey : str):
self.apikey = apikey
# Company Information
# Currency, GrossProfit in last 5 years - from 2016/12/31 to 2020/12/31, Total Revenue, NetIncome
def GetIncomeStatement(self, stock_id : str) -> DataFrame:
"""return DataFrame type stock income statement
The results will show stock annual and quarterly income statement
Parameters
----------
stock_id : str
Choose the stock you want to get income statement
"""
base_url = 'https://www.alphavantage.co/query?'
df = pd.DataFrame()
df_new = pd.DataFrame()
params = {'function': 'INCOME_STATEMENT', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
data_annual = data['annualReports']
for dict in data_annual:
df = df.append(pd.DataFrame([dict]))
df_new = df.loc[:,['fiscalDateEnding','reportedCurrency','grossProfit', 'totalRevenue', 'netIncome']]
col_name = df_new.columns.tolist()
col_name.insert(0,'Symbol')
df_new = df_new.reindex(columns=col_name)
df_new['Symbol'] = stock_id
return df_new
def GetIncomeStatement_Original(self, stock_id : str) -> DataFrame:
"""return DataFrame type stock income statement
The results will show stock annual and quarterly income statement
Parameters
----------
stock_id : str
Choose the stock you want to get income statement
"""
base_url = 'https://www.alphavantage.co/query?'
df_annual = pd.DataFrame()
df_quarterly = pd.DataFrame()
params = {'function': 'INCOME_STATEMENT', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
data_annual = data['annualReports']
data_quarterly = data['quarterlyReports']
for dict_1 in data_annual:
df_annual = df_annual.append(pd.DataFrame([dict_1]))
col_name = df_annual.columns.tolist()
col_name.insert(0,'Symbol')
df_annual = df_annual.reindex(columns=col_name)
df_annual['Symbol'] = stock_id
for dict_2 in data_quarterly:
df_quarterly = df_quarterly.append(pd.DataFrame([dict_2]))
col_name = df_quarterly.columns.tolist()
col_name.insert(0,'Symbol')
df_quarterly = df_quarterly.reindex(columns=col_name)
df_quarterly['Symbol'] = stock_id
return df_annual, df_quarterly
# Symbol, Name, Exchange, Country, Sector, Industry, Fiscal year end, 52 Week high, 52 Week low, 50DayMovingAverage, 200DayMovingAverage,
def GetCompanyOverview(self, stock_id : str) -> DataFrame:
"""return DataFrame type stock company overview
The results will show stock company overview
Parameters
----------
stock_id : str
Choose the stock you want to get company overview
"""
base_url = 'https://www.alphavantage.co/query?'
df_new = pd.DataFrame()
params = {'function': 'OVERVIEW', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
df = pd.DataFrame([data])
df_new = df.loc[:,['Symbol', 'Name','Exchange','Country', 'Sector', 'Industry', 'FiscalYearEnd', '52WeekHigh', '52WeekLow','50DayMovingAverage', '200DayMovingAverage']]
return df_new
def GetCompanyOverview_Original(self, stock_id : str) -> DataFrame:
"""return DataFrame type stock company overview
The results will show stock company overview
Parameters
----------
stock_id : str
Choose the stock you want to get company overview
"""
base_url = 'https://www.alphavantage.co/query?'
df_new = pd.DataFrame()
params = {'function': 'OVERVIEW', 'symbol': stock_id, 'apikey': self.apikey}
response = requests.get(base_url, params=params)
data = response.json() # dict
df = pd.DataFrame([data])
return df
# Symbol, Name, Exchange, AssetType, IPO Date, Delisting Date, Status
# This is the new version of function
def GetListingDelistingStatus(self) -> DataFrame:
"""return DataFrame type listing and delisting information
The results will show stock listing and delisting information
"""
CSV_URL ='https://www.alphavantage.co/query?function=LISTING_STATUS&apikey=' + self.apikey
r = requests.get(CSV_URL)
decoded_content = r.content.decode('utf-8')
df = pd.DataFrame()
for i in decoded_content.splitlines():
data_list = i.split(',')
df = df.append(pd.DataFrame(data_list).T, ignore_index=True)
df = df.rename(columns=df.iloc[0])
df = df.drop(df.index[0])
df.loc[(df["delistingDate"] == "null"), "delistingDate"] = "1970-01-01"
return df
# Symbol, Name, Exchange, AssetType, IPO Date, Delisting Date, Status
# This is the old version of function
def GetListingDelistingStatus_Original(self) -> DataFrame:
"""return DataFrame type listing and delisting information
The results will show stock listing and delisting information
"""
CSV_URL ='https://www.alphavantage.co/query?function=LISTING_STATUS&apikey=' + self.apikey
data_lst = []
with requests.Session() as s:
download = s.get(CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
data_lst.append(row)
df = pd.DataFrame(columns=data_lst[0], data = data_lst[1:])
return df
# 'symbol', 'name', 'ipoDate', 'priceRangeLow', 'priceRangeHigh', 'currency', 'exchange'
# This is the new version
def FindIPOCalender(self) -> DataFrame:
"""Return DataFrame type of incoming IPO stocks
This will show incoming listing stocks in next few days
"""
CSV_URL = 'https://www.alphavantage.co/query?function=IPO_CALENDAR&apikey=' + self.apikey
r = requests.get(CSV_URL)
decoded_content = r.content.decode('utf-8')
df = pd.DataFrame()
for i in decoded_content.splitlines():
data_list = i.split(',')
df = df.append(pd.DataFrame(data_list).T, ignore_index=True)
df = df.rename(columns=df.iloc[0])
df = df.drop(df.index[0])
return df
# Find IPO companies in the next three months
# 'symbol', 'name', 'ipoDate', 'priceRangeLow', 'priceRangeHigh', 'currency', 'exchange'
# This is the old version
def FindIPOCalender_Original(self) -> DataFrame:
"""Return DataFrame type of incoming IPO stocks
This will show incoming listing stocks in next few days
"""
CSV_URL = 'https://www.alphavantage.co/query?function=IPO_CALENDAR&apikey=' + self.apikey
data_lst = []
with requests.Session() as s:
download = s.get(CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
data_lst.append(row)
df = pd.DataFrame(columns=data_lst[0], data = data_lst[1:])
return df
# Company overview combine with the IPO date information in the listing&delisting data
def CompanyInfo(self, stock_id : str) -> DataFrame:
"""return DataFrame type stock company information
The results will show stock company information
Parameters
----------
stock_id : str
Choose the stock you want to get company information
"""
df_income_statement = self.GetListingDelistingStatus()
df_company_overview = self.GetCompanyOverview_Original(stock_id)
df_company_overview = df_company_overview.loc[:,['Symbol', 'AssetType', 'Name', 'Exchange','Country', 'Sector', 'Industry']]
df_company_IPO_date = df_income_statement.loc[df_income_statement['symbol'] == stock_id]
df_company_overview['IpoDate'] = str(df_company_IPO_date['ipoDate'].values[0])
df_company_overview['DelistingDate'] = str(df_company_IPO_date['delistingDate'].values[0])
df_company_overview.loc[(df_company_overview["DelistingDate"] == "null"), "DelistingDate"] = "1970-01-01"
df_company_overview['Status'] = str(df_company_IPO_date['status'].values[0])
return df_company_overview
# class FileOutputCSV(object):
# def __init__(self, apikey: str, StockTimeSeries : classmethod, FundamentalData : classmethod) -> None:
# self.apikey = apikey
# self.StockTimeSeries = StockTimeSeries(self.apikey)
# self.FundamentalData = FundamentalData(self.apikey)
# def CSV_Output_Original(self, stock_id : str) -> DataFrame:
# workbook = xlwt.Workbook()
# workbook.add_sheet('Daily Price')
# workbook.add_sheet('Weekly Price')
# workbook.add_sheet('Monthly Price')
# workbook.add_sheet('Intraday Price')
# workbook.add_sheet('Income Statement Annual')
# workbook.add_sheet('Income Statement Quarterly')
# workbook.add_sheet('Company Overview')
# workbook.add_sheet('Search Endpoint Results')
# workbook.add_sheet('US ListingDelisting Status')
# workbook.add_sheet('IPO Calender')
# workbook.save('Original_Data.xlsx')
# df = self.GetIncomeStatement_Original(stock_id)
# writer = pd.ExcelWriter('Original_Data.xlsx', engine='xlsxwriter')
# self.GetDailyStockPrice_Original(stock_id).to_excel(writer, sheet_name='Daily Price')
# self.GetWeeklyStockPrice(stock_id).to_excel(writer, sheet_name='Weekly Price')
# self.GetMonthlyStockPrice(stock_id).to_excel(writer, sheet_name='Monthly Price')
# self.GetIntradayStockPrice(stock_id).to_excel(writer, sheet_name='Intraday Price')
# df[0].to_excel(writer, sheet_name='Income Statement Annual')
# df[1].to_excel(writer, sheet_name='Income Statement Quarterly')
# self.GetCompanyOverview_Original(stock_id).to_excel(writer, sheet_name='Company Overview')
# self.GetSearchEndpoint(stock_id).to_excel(writer, sheet_name='Search Endpoint Results')
# self.GetListingDelistingStatus().to_excel(writer, sheet_name='US ListingDelisting Status')
# self.FindIPOCalender().to_excel(writer, sheet_name='IPO Calender')
# writer.save()
# # CSV file - Filter data
# def CSV_Output(self, stock_id : str) -> DataFrame:
# workbook = xlwt.Workbook()
# workbook.add_sheet('Daily Price')
# workbook.add_sheet('Weekly Price')
# workbook.add_sheet('Monthly Price')
# workbook.add_sheet('Intraday Price')
# workbook.add_sheet('Income Statement Annual Reports')
# workbook.add_sheet('Company Overview')
# workbook.add_sheet('Search Endpoint Results')
# workbook.add_sheet('US ListingDelisting Status')
# workbook.add_sheet('IPO Calender')
# workbook.save('Filter_Data.xlsx')
# writer = pd.ExcelWriter('Filter_Data.xlsx', engine='xlsxwriter')
# self.GetDailyStockPrice(stock_id).to_excel(writer, sheet_name='Daily Price')
# self.GetWeeklyStockPrice(stock_id).to_excel(writer, sheet_name='Weekly Price')
# self.GetMonthlyStockPrice(stock_id).to_excel(writer, sheet_name='Monthly Price')
# self.GetIntradayStockPrice(stock_id).to_excel(writer, sheet_name='Intraday Price')
# self.GetIncomeStatement(stock_id).to_excel(writer, sheet_name='Income Statement Annual Reports')
# self.GetCompanyOverview(stock_id).to_excel(writer, sheet_name='Company Overview')
# self.GetSearchEndpoint(stock_id).to_excel(writer, sheet_name='Search Endpoint Results')
# self.GetListingDelistingStatus().to_excel(writer, sheet_name='US ListingDelisting Status')
# self.FindIPOCalender().to_excel(writer, sheet_name='IPO Calender')
# writer.save()
|
Alpha-Vantage-API-Data
|
/Alpha_Vantage_API_Data-1.0.3.tar.gz/Alpha_Vantage_API_Data-1.0.3/src/Alpha_Vantage_API_Data/API_Data_Retrieves.py
|
API_Data_Retrieves.py
|
# AlphaAffixedNumericType (aant)
Python data type to support arithmetics on alphanumeric string
## Types of arithmetics supported
- Addition
`aant + integer`
\* NOTE: integer type must be on the right of addition operator
- Subtraction
`aant - [integer|aant]`
## How to Use
```python
from AlphaAffixedNumericType import AlphaAffixedNumericType
aant = AlphaAffixedNumericType('A123')
print(aant + 1) # prints 'A124'
print(aant + 1000) # prints 'A1123'
aant += 10
print(aant.get_value()) # prints 'A133'
aant2 = AlphaAffixedNumericType('A123B')
aant3 = AlphaAffixedNumericType('A124B')
print(aant2 - aant3) # prints -1
print(aant2 - 200) # raises 'NumericArithmeticException' - Numeric part of aant2 (123) is less than 200
aant4 = AlphaAffixedNumericType('A0001B')
print(aant4 + 1000) # prints 'A1001B'
```
|
AlphaAffixedNumericType
|
/AlphaAffixedNumericType-0.1.0.tar.gz/AlphaAffixedNumericType-0.1.0/README.md
|
README.md
|
import os, json, pdb, itertools
import pandas as pd
from ultron.strategy.deformer import FusionLoad
from ultron.kdutils.file import load_pickle
from jdw.mfc.entropy.deformer.fusionx import Futures
from alphaedge.plugins.quantum.base import Base
class Predictor(Base):
def __init__(self, directory, policy_id, is_groups=1):
super(Predictor, self).__init__(directory=directory,
policy_id=policy_id,
is_groups=is_groups)
def predict(self, model_desc, total_data, returns_data):
alpha_res = []
desc_dir = os.path.join(self.category_directory, "desc")
model_dir = os.path.join(self.category_directory, "model")
model_desc = model_desc if isinstance(model_desc,
list) else [model_desc]
model_list = []
for m in model_desc:
filename = os.path.join(desc_dir, "{0}.h5".format(m))
desc = load_pickle(filename)
model = FusionLoad(desc)
model_list.append(model)
columns = [model.formulas.dependency for model in model_list]
columns = list(set(itertools.chain.from_iterable(columns)))
total_data = self.normal(total_data=total_data, columns=columns)
for model in model_list:
eng = Futures(batch=model.batch,
freq=model.freq,
horizon=model.horizon,
id=model.id,
is_full=True,
directory=model_dir)
factors = eng.create_data(total_data=total_data,
returns=returns_data)
alpha_res.append(factors)
return pd.concat(alpha_res, axis=1)
def calculate(self, total_data, returns_data=None):
policy_file = os.path.join(self.directory, "policy.json")
with open(policy_file, 'r') as json_file:
policy_data = json.load(json_file)
model_desc = policy_data['groups'] if self.is_groups else policy_data[
'main']
return self.predict(model_desc=model_desc,
total_data=total_data,
returns_data=returns_data)
|
AlphaEdge
|
/AlphaEdge-0.0.4.tar.gz/AlphaEdge-0.0.4/alphaedge/plugins/quantum/predictor.py
|
predictor.py
|
import os, json, copy, math
import pandas as pd
import numpy as np
from ultron.tradingday import *
from ultron.strategy.experimental.multiple_factor import MultipleFactor
class Constraint(object):
def __init__(self, directory, policy_id, volatility_name):
self.policy_id = policy_id
self.directory = os.path.join(directory, self.policy_id)
self.volatility_name = volatility_name
def load_configure(self):
policy_file = os.path.join(self.directory, "policy.json")
with open(policy_file, 'r') as json_file:
policy_data = json.load(json_file)
return policy_data['constraint']
def calculate(self, weighted_data, volatility_data, prev_returns):
configure = self.load_configure()
volatility_mc = MultipleFactor(signal_data=None,
volatility_data=volatility_data,
returns_data=prev_returns)
volatility_data = volatility_mc._winsorize_volatility(
name=self.volatility_name, volatility_data=volatility_data)
volatility_data['trade_date'] = pd.to_datetime(
volatility_data['trade_date'])
weighted_groups = weighted_data.groupby('trade_date')
res = []
for ref_date, this_data in weighted_groups:
begin_date = advanceDateByCalendar(
'china.sse', ref_date, '-{0}b'.format(configure['window']))
end_date = advanceDateByCalendar('china.sse', ref_date, '-0b')
signal = weighted_data.set_index('trade_date').loc[end_date]
volatility = volatility_data.set_index('trade_date').loc[end_date]
returns = prev_returns.set_index(
'trade_date').loc[begin_date:end_date].reset_index()
codes = set(signal.code.unique().tolist()) & set(
returns.code.unique().tolist()) & set(
volatility.code.unique().tolist())
returns = returns.set_index('code').loc[codes].reset_index()
signal = signal.set_index('code').loc[codes].reset_index()
w = copy.deepcopy(this_data)
corr_dt = volatility_mc._returns_corr(returns).fillna(0)
###重置w, 波动率顺序
w = w.set_index('code').reindex(corr_dt.index).reset_index()
volatility = volatility.set_index('code').reindex(
corr_dt.index).reset_index()
data = w.merge(corr_dt, on=['code']).merge(volatility, on=['code'])
cols = [
col for col in data.columns if col not in [
'code', 'signal', 'trade_date', 'weight',
self.volatility_name
]
]
s = data['weight'] * data[self.volatility_name]
v = data[self.volatility_name]
n = np.dot(s.T, data[cols])
if n.shape[0] != s.shape[0]:
print(n.shape[0], n.shape[0])
else:
m = np.dot(n, s)
if m == 0:
continue
op = math.sqrt(m)
weighted_dt = copy.deepcopy(this_data)
weighted_dt['weight'] = ((configure['volatility'] / op) *
this_data['weight'])
res.append(weighted_dt.set_index(['trade_date', 'code']))
target_pos = pd.concat(res, axis=0)
return target_pos
|
AlphaEdge
|
/AlphaEdge-0.0.4.tar.gz/AlphaEdge-0.0.4/alphaedge/plugins/quantum/constraint.py
|
constraint.py
|
import os, json, pdb
import pandas as pd
from ultron.tradingday import *
from ultron.strategy.optimize import Optimize
def create_params(**kwargs):
### 序列设置
industry_effective = [] if 'industry_effective' not in kwargs else kwargs[
'industry_effective']
industry_invalid = [] if 'industry_invalid' not in kwargs else kwargs[
'industry_invalid']
riskstyle = [] if 'riskstyle' not in kwargs else kwargs['riskstyle']
### 通用参数设置
weights_bandwidth = 0.1 if 'weights_bandwidth' not in kwargs else kwargs[
'weights_bandwidth']
method = 'fmv' if 'method' not in kwargs else kwargs['method']
turn_over_target = 1.0 if 'turn_over_target' not in kwargs else kwargs[
'turn_over_target']
target_vol = 0.1 if 'target_vol' not in kwargs else kwargs['target_vol']
lbound = 0. if 'lbound' not in kwargs else kwargs['lbound']
ubound = 0.04 if 'ubound' not in kwargs else kwargs['ubound']
is_benchmark = 0 if 'is_benchmark' not in kwargs else kwargs['is_benchmark']
### benchmark 区间设置
benchmark_boundary = 'relative' if 'benchmark_boundary' not in kwargs else kwargs[
'benchmark_boundary']
benchmark_lower = 1.001 if 'benchmark_lower' not in kwargs else kwargs[
'benchmark_lower']
benchmark_upper = 0.8 if 'benchmark_upper' not in kwargs else kwargs[
'benchmark_upper']
#### total 区间设置
total_boundary = 'relative' if 'total_boundary' not in kwargs else kwargs[
'total_boundary']
total_lower = 0.001 if 'total_lower' not in kwargs else kwargs[
'total_lower']
total_upper = 0.01 if 'total_upper' not in kwargs else kwargs['total_upper']
#### 上限行业区间设置
effective_industry_boundary = 'absolute' if 'effective_industry_boundary' not in kwargs else kwargs[
'effective_industry_boundary']
effective_industry_lower = 0.0 if 'effective_industry_lower' not in kwargs else kwargs[
'effective_industry_lower']
effective_industry_upper = 0.20 if 'effective_industry_upper' not in kwargs else kwargs[
'effective_industry_upper']
#### 下限行业区间设置
invalid_industry_boundary = 'absolute' if 'invalid_industry_boundary' not in kwargs else kwargs[
'invalid_industry_boundary']
invalid_industry_lower = 0.0 if 'invalid_industry_lower' not in kwargs else kwargs[
'invalid_industry_lower']
invalid_industry_upper = 0.20 if 'invalid_industry_upper' not in kwargs else kwargs[
'invalid_industry_upper']
riskstyle_boundary = 'absolute' if 'riskstyle_boundary' not in kwargs else kwargs[
'riskstyle_boundary']
riskstyle_lower = 0.0 if 'riskstyle_lower' not in kwargs else kwargs[
'riskstyle_lower']
riskstyle_upper = 0.20 if 'riskstyle_upper' not in kwargs else kwargs[
'riskstyle_upper']
neutralized_styles = None if 'neutralized_styles' not in kwargs else kwargs[
'neutralized_styles']
other_boundary = 'absolute' if 'other_boundary' not in kwargs else kwargs[
'other_boundary']
params = {}
params['industry'] = {}
params['riskstyle'] = {}
### 序列设置
params['industry']['effective'] = industry_effective
params['industry']['invalid'] = industry_invalid
params['riskstyle'] = riskstyle
### 通用参数设置
params['setting_params'] = {}
params['setting_params']['weights_bandwidth'] = weights_bandwidth
params['setting_params']['method'] = method
params['setting_params']['turn_over_target'] = turn_over_target
params['setting_params']['target_vol'] = target_vol
params['setting_params']['lbound'] = lbound
params['setting_params']['ubound'] = ubound
params['setting_params']['is_benchmark'] = is_benchmark
params['setting_params']['benchmark'] = {}
params['setting_params']['total'] = {}
params['setting_params']['other'] = {}
###
params['setting_params']['other']['boundary'] = other_boundary
params['setting_params']['other']['lower'] = 0.0
params['setting_params']['other']['upper'] = 0.0
# benchmark 区间设置
params['setting_params']['benchmark']['boundary'] = benchmark_boundary
params['setting_params']['benchmark']['lower'] = benchmark_lower
params['setting_params']['benchmark']['upper'] = benchmark_upper
# total 区间设置 条件6
params['setting_params']['total']['boundary'] = total_boundary
params['setting_params']['total']['lower'] = total_lower
params['setting_params']['total']['upper'] = total_upper
### 此处考虑行业择时
params['setting_params']['effective_industry'] = {}
params['setting_params']['invalid_industry'] = {}
#### effective_industry 上限行业区间设置
params['setting_params']['effective_industry'][
'boundary'] = effective_industry_boundary
params['setting_params']['effective_industry'][
'lower'] = effective_industry_lower
params['setting_params']['effective_industry'][
'upper'] = effective_industry_upper
#### invalid_industry 下限行业区间设置
params['setting_params']['invalid_industry'][
'boundary'] = invalid_industry_boundary
params['setting_params']['invalid_industry'][
'lower'] = invalid_industry_lower
params['setting_params']['invalid_industry'][
'upper'] = invalid_industry_upper
### riskstyle 风格设置
params['setting_params']['riskstyle'] = {}
params['setting_params']['riskstyle']['boundary'] = riskstyle_boundary
params['setting_params']['riskstyle']['lower'] = riskstyle_lower
params['setting_params']['riskstyle']['upper'] = riskstyle_upper
params['setting_params']['neutralized_styles'] = neutralized_styles
return params
class Optimizer(object):
def __init__(self, directory, policy_id):
self.policy_id = policy_id
self.directory = os.path.join(directory, self.policy_id)
self.method = None
def load_configure(self):
policy_file = os.path.join(self.directory, "policy.json")
with open(policy_file, 'r') as json_file:
policy_data = json.load(json_file)
self.method = policy_data['optimizer']['method'].split('_')[0]
return create_params(**policy_data['optimizer'])
def load_features(self):
policy_file = os.path.join(self.directory, "policy.json")
with open(policy_file, 'r') as json_file:
policy_data = json.load(json_file)
return policy_data['main']
def tailor_data(self, total_data):
industry_dummy = pd.get_dummies(
total_data.set_index(['trade_date',
'code'])['industry_code']).reset_index()
total_data = total_data.merge(industry_dummy,
on=['trade_date', 'code'])
return total_data
def calculate(self, total_data, risk_model):
pdb.set_trace()
configure = self.load_configure()
total_data = self.tailor_data(total_data)
begin_date = pd.to_datetime(
total_data['trade_date']).dt.strftime('%Y-%m-%d').min()
end_date = pd.to_datetime(
total_data['trade_date']).dt.strftime('%Y-%m-%d').max()
optimize = Optimize(alpha_model=None,
category=self.method,
features=[self.load_features()],
begin_date=begin_date,
end_date=end_date,
risk_model=risk_model,
index_returns=None,
total_data=total_data)
return optimize.rebalance_positions(configure).reset_index(drop=True)
|
AlphaEdge
|
/AlphaEdge-0.0.4.tar.gz/AlphaEdge-0.0.4/alphaedge/plugins/quantum/optimizer.py
|
optimizer.py
|
from typing import List, Union
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import os
from datetime import datetime
import time
import logging
logging.basicConfig(level=logging.INFO)
class AlphaFetcher:
"""
A class to fetch and download protein metadata and files from the AlphaFold Protein Structure Database using
Uniprot access codes.
Attributes:
uniprot_access_list (List[str]): A list storing the Uniprot access codes to be fetched.
failed_ids (List[str]): A list storing any Uniprot access codes that failed to be fetched.
metadata_dict (dict): A dictionary storing fetched metadata against each Uniprot access code.
base_savedir (str): The base directory where fetched files will be saved.
"""
def __init__(self):
"""
Initializes the AlphaFetcher class with default values.
"""
self.uniprot_access_list = []
self.failed_ids = []
self.metadata_dict = {}
self.base_savedir = os.path.join(os.getcwd(), f'alphafetcher_results_'
f'{datetime.now().strftime("%Y%m%d_%H%M%S")}')
def add_proteins(self, proteins: Union[str, List[str]]) -> None:
"""
Adds the provided Uniprot access codes to the list for fetching.
Args:
proteins (Union[str, List[str]]): A single Uniprot access code or a list of codes.
Raises:
ValueError: If the provided proteins parameter is neither a string nor a list of strings.
"""
if isinstance(proteins, str):
self.uniprot_access_list.append(proteins)
elif isinstance(proteins, list):
self.uniprot_access_list.extend(proteins) # Using extend() method to add multiple items from a list.
else:
raise ValueError("Expected a string or a list of strings, but got {}".format(type(proteins)))
def _fetch_single_metadata(self, uniprot_access: str, alphafold_database_base: str, pbar=None):
"""
Fetches the metadata for a single Uniprot access code.
Args:
uniprot_access (str): The Uniprot access code to fetch.
alphafold_database_base (str): The base URL for the Alphafold API.
pbar (tqdm, optional): A tqdm progress bar. Defaults to None.
"""
response = requests.get(f"{alphafold_database_base}{uniprot_access}")
if response.status_code == 200:
alphafold_data = response.json()[0]
self.metadata_dict[uniprot_access] = alphafold_data
else:
self.failed_ids.append(uniprot_access)
if pbar:
pbar.update(1)
def fetch_metadata(self, multithread: bool = False, workers: int = 10):
"""
Fetches metadata for all the Uniprot access codes added to the class.
Args:
multithread (bool, optional): If true, uses multithreading for faster fetching. Defaults to False.
workers (int, optional): Number of threads to use if multithreading. If -1, uses all available CPUs.
Defaults to 10.
"""
alphafold_api_base = "https://alphafold.ebi.ac.uk/api/prediction/"
# Use all available CPUs if workers is set to -1
if workers == -1:
workers = os.cpu_count() or 1 # Default to 1 if os.cpu_count() returns None
if len(self.uniprot_access_list) == 0:
print('Please a list of Uniprot access codes with the method add_proteins()')
return
with tqdm(total=len(self.uniprot_access_list), desc="Fetching Metadata") as pbar:
if multithread:
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = [executor.submit(self._fetch_single_metadata, uniprot_access, alphafold_api_base,
pbar) for uniprot_access in self.uniprot_access_list]
# Ensure all futures have completed
for _ in as_completed(futures):
pass
else:
for uniprot_access in self.uniprot_access_list:
self._fetch_single_metadata(uniprot_access, alphafold_api_base, pbar)
if len(self.failed_ids) > 0:
print(f'Uniprot accessions not found in database: {", ".join(self.failed_ids)}')
def _download_single_protein(self, uniprot_access: str, pdb: bool = False, cif: bool = False, bcif: bool = False,
pae_image: bool = False, pae_data: bool = False, pbar=None):
"""
Downloads files for a single Uniprot access code.
Args:
uniprot_access (str): The Uniprot access code to fetch.
pdb (bool, optional): If true, downloads the pdb file. Defaults to False.
cif (bool, optional): If true, downloads the cif file. Defaults to False.
bcif (bool, optional): If true, downloads the bcif file. Defaults to False.
pae_image (bool, optional): If true, downloads the PAE image file. Defaults to False.
pae_data (bool, optional): If true, downloads the PAE data file. Defaults to False.
pbar (tqdm, optional): A tqdm progress bar. Defaults to None.
"""
links_to_download = []
metadata_dict = self.metadata_dict[uniprot_access]
if pdb:
pdb_savedir = os.path.join(self.base_savedir, 'pdb_files')
extension = 'pdb'
links_to_download.append([metadata_dict['pdbUrl'], pdb_savedir, extension])
if cif:
cif_savedir = os.path.join(self.base_savedir, 'cif_files')
extension = 'cif'
links_to_download.append([metadata_dict['cifUrl'], cif_savedir, extension])
if bcif:
bcif_savedir = os.path.join(self.base_savedir, 'bcif_files')
extension = 'bcif'
links_to_download.append([metadata_dict['bcifUrl'], bcif_savedir, extension])
if pae_image:
pae_image_savedir = os.path.join(self.base_savedir, 'pae_image_files')
extension = 'png'
links_to_download.append([metadata_dict['paeImageUrl'], pae_image_savedir, extension])
if pae_data:
pae_data_savedir = os.path.join(self.base_savedir, 'pae_data_files')
extension = 'json'
links_to_download.append([metadata_dict['paeDocUrl'], pae_data_savedir, extension])
if len(links_to_download) == 0:
print('Please select a type of data to download')
return
for data_type in links_to_download:
data_type_url = data_type[0]
data_type_savedir = data_type[1]
file_extension = data_type[2]
if not os.path.isdir(data_type_savedir):
os.makedirs(data_type_savedir, exist_ok=True)
response = requests.get(data_type_url)
if response.status_code == 200:
save_path = os.path.join(data_type_savedir, f"{uniprot_access}.{file_extension}")
with open(save_path, 'wb') as f:
f.write(response.content)
else:
print(f"Error with protein {uniprot_access}")
return
if pbar:
pbar.update(1)
def download_all_files(self, multithread: bool = False, workers: int = 10, pdb: bool = False, cif: bool = False,
bcif: bool = False, pae_image: bool = False, pae_data: bool = False):
"""
Downloads files for all the Uniprot access codes added to the class.
Args:
multithread (bool, optional): If true, uses multithreading for faster downloading. Defaults to False.
workers (int, optional): Number of threads to use if multithreading. If -1, uses all available CPUs.
Defaults to 10.
pdb (bool, optional): If true, downloads the pdb file. Defaults to False.
cif (bool, optional): If true, downloads the cif file. Defaults to False.
bcif (bool, optional): If true, downloads the bcif file. Defaults to False.
pae_image (bool, optional): If true, downloads the PAE image file. Defaults to False.
pae_data (bool, optional): If true, downloads the PAE data file. Defaults to False.
"""
# Use all available CPUs if workers is set to -1
if workers == -1:
workers = os.cpu_count() or 1 # Default to 1 if os.cpu_count() returns None
if len(self.uniprot_access_list) == 0:
print('Please a list of Uniprot access codes with the method add_proteins()')
return
# This means that fetch_metadata has not been called. If it was called but had invalid codes, self.failed_ids
# would not be empty
if len(self.metadata_dict) == 0 and len(self.failed_ids) == 0:
self.fetch_metadata(multithread=multithread, workers=workers)
# This means that after fetching the metadata, there were no valid uniprot access codes
if len(self.metadata_dict) == 0 and len(self.failed_ids) > 0:
print('No valid Uniprot access codes provided')
return
valid_uniprots = self.metadata_dict.keys()
with tqdm(total=len(valid_uniprots), desc="Fetching files") as pbar:
if multithread:
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = {executor.submit(self._download_single_protein, uniprot_access, pdb, cif, bcif, pae_image,
pae_data, pbar): uniprot_access for uniprot_access in valid_uniprots}
# Ensure all futures have completed and handle exceptions
for future in as_completed(futures):
uniprot_access = futures.get(future)
try:
future.result()
except Exception as e:
logging.error(f"Error in thread for {uniprot_access}: {e}")
else:
for uniprot_access in valid_uniprots:
self._download_single_protein(uniprot_access, pdb, cif, bcif, pae_image, pae_data, pbar)
|
AlphaFetcher
|
/AlphaFetcher-0.1.0b2-py3-none-any.whl/alphafetcher/wrapper/wrapper_utils.py
|
wrapper_utils.py
|
import time
from Constants import MISSINGGENOTYPECODE
import numpy as np
# import scipy as sp
import typing
log1 = 0
loge = np.log(np.exp(-5))
log5 = np.log(0.5)
logd = loge
# Global variables defined for heuristic geneprob
heuristicTrace = np.empty((3,3))
heuristicTrace[0,:] = (log1,log5,loge)
heuristicTrace[0,:] = (loge,log5,log1)
heuristicTrace[0,:] = (log1,log5,loge)
heuristicTraceNoSeg = np.empty((3,4),dtype=np.float64)
heuristicTraceNoSeg[0, :] = (log1, log5, log5, loge)
heuristicTraceNoSeg[1, :] = (loge, log5, log5, log1)
heuristicTraceNoSeg[2, :] = (loge, log5, log5, log1)
heuristicTraceSeg = np.empty((2,2,4),dtype=np.float64)
heuristicTraceSeg[0,0,:] = (log1, log1, logd, logd)
heuristicTraceSeg[1,0,:] = (logd, logd, log1, log1)
heuristicTraceSeg[0, 1 ,:]= (log1, logd, log1, logd)
heuristicTraceSeg[0,1,:] = (logd, log1, logd, log1)
DISTANCETHRESHOLD = 15
def heuristicGenerob(ped):
for ind in ped:
for (i,g) in enumerate(ind.genotype):
genosProbs = np.zeros(3,dtype=np.float64)
if (g != MISSINGGENOTYPECODE): continue
for off in ind.offsprings:
offG = off.genotype[i]
if offG == MISSINGGENOTYPECODE: continue
if (off.sire == ind):
sire = True
e = 0
else:
sire = False
e = 1
if (offG == 1):
p = off.haplotypes[e][i]
if (p != MISSINGGENOTYPECODE):
genosProbs[:]= genosProbs[:] + heuristicTrace[:,p]
else:
if (sire):
gOtherParent = off.dam.genotype[i]
else:
gOtherParent = off.sire.genotype[i]
if (gOtherParent == 0 or gOtherParent == 1):
p = 1 - gOtherParent / 2
genosProbs[:] = genosProbs[:] + heuristicTrace[:,p]
else:
genosProbs[:] = genosProbs[:] + heuristicTrace[:,offG]
indexes = np.argpartition(genosProbs, -2)[-2:] #get two largest indexes as a list
largest = genosProbs[indexes[0]]
secondLargest = genosProbs[indexes[1]]
pos = indexes[0]
if (largest - secondLargest > DISTANCETHRESHOLD):
ind.genotype[i] = pos
def heuristicMLP(ped):
peelDown(ped)
updateSeg(ped)
peelUp(ped)
def peelDown(ped):
parentPhase = np.zeros((2,2), dtype=np.int8)
parentGeno = np.zeros((2), dtype=np.int8)
for ind in ped:
ind.setSegToMissing()
if ind.founder: continue
for i,g in enumerate(ind.genotype):
parentPhase[0,0] = ind.sire.haplotypes[0][i]
parentPhase[0,1] = ind.sire.haplotypes[1][i]
parentPhase[1,0] = ind.dam.haplotypes[0][i]
parentPhase[1,1] = ind.dam.haplotypes[1][i]
parentGeno[0] = ind.sire.genotype[i]
parentGeno[1] = ind.dam.genotype[i]
for h in range(0,2):
if parentGeno[h] == 2:
ind.haplotypes[h][i] = 1
elif parentGeno[h] == 0:
ind.haplotypes[h][i] = 0
elif parentGeno[h] == 1:
if (parentPhase[h,1] != MISSINGGENOTYPECODE and parentPhase[h,1] != MISSINGGENOTYPECODE):
seg = ind.seg[i,h]
if seg != MISSINGGENOTYPECODE:
ind.haplotypes[h][i] = parentPhase[h,seg]
ind.makeIndividualGenotypeFromPhase()
ind.makeIndividualPhaseCompliment()
def peelUp(ped):
for ind in ped.GenotypedIndividuals:
for i, g in ind.genotype:
phaseProbs = np.zeros(ind.genotype.length)
for off in ind.offsprings:
offG = off.genotype[i]
if offG != MISSINGGENOTYPECODE: continue
if (off.sire == ind):
e = 0
sire = True
else:
e = 1
sire = False
childSeg= ind.seg[i,e]
if (childSeg != MISSINGGENOTYPECODE):
childPhase = ind.haplotypes[e][i]
if (childPhase != MISSINGGENOTYPECODE):
phaseProbs[:]= phaseProbs[:] + heuristicTraceSeg[childPhase,childSeg, :]
else:
if g == 1:
p = off.haplotypes[e][i]
if (p!= MISSINGGENOTYPECODE):
phaseProbs[:] = phaseProbs[:]+ heuristicTraceNoSeg[g,:]
else:
if (sire):
g = off.dam.genotype[i]
else:
g = off.sire.genotype[i]
if (g == MISSINGGENOTYPECODE): continue
phaseProbs[:] = phaseProbs[:] + heuristicTraceNoSeg[g,:]
indexes = np.argpartition(phaseProbs, -2)[-2:] #get two largest indexes as a list
largest = phaseProbs[indexes[0]]
secondLargest = phaseProbs[indexes[1]]
pos = indexes[0]
secondPos = indexes[1]
if (largest - secondLargest) > 15-1:
if pos == 0:
ind.genotype[i] = 0
ind.phase[0][i] = 0
ind.phase[1][i] = 0
elif pos == 1:
ind.genotype[i] = 1
ind.phase[0][i] = 0
ind.phase[1][i] = 1
elif pos == 2:
ind.genotype[i] = 1
ind.phase[0][i] = 1
ind.phase[1][i] = 0
elif pos == 3:
ind.genotype[i] = 2
ind.phase[0][i] = 1
ind.phase[1][i] = 1
else:
genosProbs = (phaseProbs[0], np.max(phaseProbs[1:]))
indexes = np.argpartition(genosProbs, -2)[-2:] #get two largest indexes as a list
largest = genosProbs[indexes[0]]
secondLargest = genosProbs[indexes[1]]
pos = indexes[0]
if (largest - secondLargest) > DISTANCETHRESHOLD:
ind.genotype[i] = pos
def updateSeg(ped):
threshold = 3
for ind in ped:
if ind.founder: continue
for i,g in enumerate(ind.genotype):
genos = (ind.sire.genotype[i], ind.dam.genotype[i])
if (np.mod(genos[0],2) == 0 and np.mod(genos[1],2) == 0): continue
if (ind.haplotypes[0][i] != MISSINGGENOTYPECODE and ind.haplotypes[1][i] != MISSINGGENOTYPECODE):
parents = ind.getParentArray()
for e in range(0,2):
parent = parents[e]
if (genos[e] == 1):
phase = (parent.haplotypes[0][i],parent.haplotypes[1][i])
if (not all(phase) == 9):
if (ind.haplotypes[e][i] == phase[0]):
ind.seg[i,e] = 1 #TODO originally 1 and 2 _TEST! MIGHT NEED TO BE 0, 1
elif ind.haplotypes[e][i] == phase[1]:
ind.seg[i,e] = 2
for i in range(0,2):
startLoc = 0
lastLoc = 0
currentSeg = -1
numConsistent = 0
for h in range(0, ind.genotype.length):
if ind.seg[h,i] != MISSINGGENOTYPECODE:
lastLoc = h
numConsistent += 1
else:
if (numConsistent > threshold):
ind.seg[startLoc:lastLoc,i] = currentSeg
else:
ind.seg[startLoc:lastLoc,i] = MISSINGGENOTYPECODE
startloc = h
lastLoc = h
currentSeg = ind.seg[h,i]
numConsistent = 1
lastLoc = ind.genotype.length
if (numConsistent > threshold):
ind.seg[startLoc:lastLoc,i] = currentSeg
else:
ind.seg[startLoc:lastLoc,i] = MISSINGGENOTYPECODE
# ped = Pedigree(file='recoded-pedigree.txt')
# print("READ IN PED")
# start = time.time()
# ped.addGenotypesFromFile('genos.txt',initAll=True)
# end = time.time()
# print("time" + str(end - start))
# # ped.addGenotypesFromFile('genoTest.txt',initAll=True)
# print("READ IN geno")
# start = time.time()
# # heuristicMLP(ped)
# heuristicGenerob(ped)
# end = time.time()
# print("time" + str(end - start))
|
AlphaHousePython
|
/alphahousepython-1.0.6-cp36-cp36m-macosx_10_6_x86_64.whl/alphahousepython/src/HeuristicGeneProb.py
|
HeuristicGeneProb.py
|
from typing import Union, List
import numpy as np
from bitarray import bitarray
# from Genotype import Genotype
# import Genotype
import Genotype as Genotype
import Utils as Utils
use_numba = True
try:
import numba as nb
from nb import jit, int8, int32, boolean, jitclass
except ImportError:
use_numba = False
from _faker import jit, int8, int32, boolean, jitclass
from Constants import MEMORYSAVE, ERRORPHASECODE, MISSINGPHASECODE
class EmptyHaploypeException(Exception):
def __init__(self):
Exception.__init__(self, "Empty Haplotype given")
def checkValid(val):
if (val == MISSINGPHASECODE or val == 0 or val == 1):
return True
return False
if (use_numba):
spec = [
('array', int8[:]), # a simple scalar field
('phase', int8[:]), # an array field
('missing', int8[:]),
('lock', boolean[:]),
('start', int32),
('weight', int32),
('boolean', boolean)
]
# @jitclass(spec)
class Haplotype:
"""
Phase Phase Missing !
0 0 0 !
1 1 0 !
Missing 0 1 !
Error 1 1 !
"""
def __init__(self, array=None, phase=None, missing=None, locked=None, lock=False, start=0, weight=1):
"""
Default constructor for haplotypes
Takes in array (that can be converted to ints) and assigns haplotype object
Parameters
------------
array : [int]
array of 0,1,9 of haplotype information
lock : [bool]
And an iterable boolean of items that should be locked
"""
if array is None:
array = []
if (len(array) > 0):
if MEMORYSAVE:
self.__initFromArray(array, lock)
else:
self.array = np.array(array, dtype=np.int8)
elif (phase != None and missing != None):
self.__initFromBitArrays(phase, missing, locked)
else:
self.locked = []
if MEMORYSAVE:
self.missing = []
self.phase = []
else:
self.array = []
self.start = start
self.weight = weight
self.hasLock = False # this is a boolean
def getEnd(self) -> int:
return len(self) + self.start
def __initFromBitArrays(self, phase: bitarray, missing: bitarray, locked: bitarray):
self.phase = phase
self.missing = missing
self.locked = locked
def __initFromArray(self, array, lock):
self.phase = bitarray(len(array))
self.missing = bitarray(len(array))
self.locked = bitarray(len(array))
for i in range(0, len(self)):
val = int(array[i])
if (val == 1):
self.phase[i] = True
self.missing[i] = False
elif (val == 0):
self.phase[i] = False
self.missing[i] = False
# everything is already 0
elif (val == MISSINGPHASECODE):
self.phase[i] = False
self.missing[i] = True
else: # set to erro
# TODO throw error
self.phase[i] = True
self.missing[i] = True
if (lock):
self.hasLock = True
self.locked = bitarray(len(self))
for i in range(0, len(self)):
self.locked[i] = (~ self.missing[i])
def __len__(self) -> int:
"""returns length of haplotype"""
if MEMORYSAVE:
return len(self.phase)
else:
return len(self.array)
def __getitem__(self, item) -> Union[int, List[int]]:
"""gets haplotype at given snp"""
if isinstance(item, slice):
return [self[ii] for ii in range(*item.indices(len(self)))]
# TODO think
# return self.getSubsetHaplotype(start=item.start,end=item.stop)
elif isinstance(item, list):
return [self[ii] for ii in item]
elif isinstance(item, int):
if item < 0 or item >= len(self):
raise IndexError("The index (%d) is out of range." % item)
else:
if MEMORYSAVE:
return self.getPhase(item)
else:
return self.array[item]
def getSubsetHaplotype(self, start: int, end: int) -> 'Haplotype':
""" Start of subset haplotype is current haplotype start + start"""
start = int(start)
end = int(end)
return Haplotype(self[start:end], start=start + self.start, weight=self.weight)
def __eq__(self, other: object) -> bool:
"""compares two haplotpyes"""
if isinstance(other, Haplotype):
if self.start != other.start:
return False
if MEMORYSAVE:
if self.phase != other.phase or self.missing != other.missing:
return False
else:
return np.array_equal(self.array, other.array)
else:
return False
return True
def contains(self, index) :
return index >= self.start and index < self.getEnd()
def toIntArray(self) -> np.ndarray:
"""Returns an integer (int8) array of the haplotype.
"""
if not MEMORYSAVE:
return self.array
ret = np.empty(len(self), np.int8)
c = 0
for hom, add in zip(self.phase, self.missing):
if (hom and add):
ret[c] = 9
elif (hom):
ret[c] = 1
elif (add):
ret[c] = 9
else:
ret[c] = 0
c += 1
return ret
# Some utility functions to make haplotype math/comparisons moderately easier. This may be a bad idea. Not using (or tested) for now.
def toZeroOneIntArray(self) -> np.ndarray:
"""Returns an integer (int8) array of the haplotype.
"""
ret = np.zeros(len(self), np.int64)
if not MEMORYSAVE:
np.place(ret, self.array == MISSINGPHASECODE, 0)
np.place(ret, self.array == 1, 1)
return ret
c = 0
for hom, add in zip(self.phase, self.missing):
if (hom and add):
ret[c] = 0
elif (hom):
ret[c] = 1
elif (add):
ret[c] = 0
else:
ret[c] = 0
c += 1
return ret
def toNonMissingArray(self) -> np.ndarray:
"""Returns an integer (int8) array of the haplotype.
"""
ret = np.zeros(len(self), np.int64)
if not MEMORYSAVE:
np.place(ret, self.array == MISSINGPHASECODE, 0)
np.place(ret, self.array != MISSINGPHASECODE, 1)
return ret
c = 0
for hom, add in zip(self.phase, self.missing):
if (hom and add):
ret[c] = 0
elif (hom):
ret[c] = 1
elif (add):
ret[c] = 0
else:
ret[c] = 1
c += 1
return ret
def __setitem__(self, key: int, value: int):
if MEMORYSAVE:
self.setPhase(key, value)
else:
self.setPhaseArray(key, value)
def __iter__(self):
for i in range(0, len(self)):
if MEMORYSAVE:
yield self.getPhase(i)
else:
yield self.getPhaseArray(i)
def getPhase(self, pos: int):
if not MEMORYSAVE:
raise NotImplementedError("ERROR - get phase should only be called in verbose memory mode")
if (self.missing[pos] and self.phase[pos]):
return ERRORPHASECODE
elif (self.missing[pos]):
return MISSINGPHASECODE
elif (self.phase[pos]):
return 1
else:
return 0
def getPhaseArray(self, pos : int):
return self.array[pos]
def append(self, value: int):
self[len(self)] = value
def setPhaseArray(self, pos, value):
if not checkValid(value):
value = MISSINGPHASECODE
if (pos < len(self)):
self.array[pos] = value
if (pos == len(self)):
self.array = np.append(self.array, [value])
if (self.hasLock):
self.locked.append(Utils.bitMap[False])
def setPhase(self, pos: int, value: int):
value = int(value)
if (value == 0):
phase = False
miss = False
elif (value == 1):
phase = True
miss = False
elif (value == MISSINGPHASECODE):
phase = False
miss = True
else:
phase = True
miss = True
if (pos == len(self)):
self.phase.append(Utils.bitMap[phase])
self.missing.append(Utils.bitMap[miss])
if (self.hasLock):
self.locked.append(Utils.bitMap[False])
else:
self.phase[pos] = phase
self.missing[pos] = miss
def __str__(self):
if self.array is not None:
string = " "
string = string.join(str(x) for x in self.array)
else:
string = ""
for i in range(0, len(self)):
string += str(self.getPhase(i)) + " "
return string
def setFromOtherIfMissing(self, oh: 'Haplotype'):
'''!> @brief Sets one haplotypes from another if the first is missing
!> @detail Uses the following rules per snp:
!> 1) If haplotype 1 is phased then return that phase
!> 2) If haplotype 1 is missing but haplotype 2 is phased then
!> return that phase
!> 3) If both are unphased and both error then error
!> 4) If both are missing then missing
!> @date May 25, 2017'''
if (len(self) != len(oh)):
raise IndexError("Haplotypes are different lengths ")
if MEMORYSAVE:
self.phase = ((~self.missing & self.phase) | (self.missing & oh.phase))
self.missing = (self.missing & oh.missing)
else:
for i in range(0, len(self)):
if self[i] == MISSINGPHASECODE:
if (oh[i] == 1):
self[i] = 1
elif (oh[i] == 0):
self[i] = 0
def setFromGenotypeIfMissing(self, g: Genotype):
if (len(self) != len(g)):
raise IndexError("Genotype and Haplotype are different lengths ")
if MEMORYSAVE:
self.phase = ((~self.missing & self.phase) | (self.missing & (g.homo & g.additional)))
self.missing = ~g.homo & self.missing
else:
for i in range(0, len(self)):
if self[i] == MISSINGPHASECODE:
if (g[i] == 2):
self[i] = 1
elif (g[i] == 0):
self[i] = 0
def countMissing(self) -> int:
if MEMORYSAVE:
return (~self.phase & self.missing).count(True)
else:
return (self.array == 9).sum()
def countNotMissing(self) -> int:
return len(self) - self.countMissing()
def countNotEqualExcludeMissing(self, other):
if MEMORYSAVE:
return (((self.phase ^ other.phase) | (self.missing ^ other.missing)) & (
(~self.missing) & (~other.missing))).count(True)
else:
count = 0
for i in range(0, len(self)):
if not (self[i] == MISSINGPHASECODE or other[i] == MISSINGPHASECODE):
if (self[i] != other[i]):
count += 1
return count
def percentageMissing(self) -> float:
return self.countMissing() / len(self)
class IntersectCompare:
def __init__(self, matching, nonMatching, nonMissing, total):
self.matching = matching
self.nonMatching = nonMatching
self.nonMissing = nonMissing
self.total = total
# if (use_numba):
@jit(parallel=True)
def compareHapsOnIntersect(hap1: Haplotype, hap2: Haplotype) -> IntersectCompare:
if not MEMORYSAVE:
return compareHapsOnIntersectArray(hap1, hap2)
nonMissing = 0
matching = 0
start = max(hap1.start, hap2.start)
end = min(len(hap1) + hap1.start, len(hap2) + hap2.start)
h1 = hap1.getSubsetHaplotype(start - hap1.start, end - hap1.start)
h2 = hap2.getSubsetHaplotype(start - hap2.start, end - hap2.start)
if h1.length == 0 or h2.length == 0:
return IntersectCompare(0, 0, 0, end - start + 1)
nonMissing = (~h1.missing & ~h2.missing).count(True)
matching = ((~h1.missing & ~h2.missing) & ((h1.phase & h2.phase) ^ ((~h1.phase & ~h2.phase)))).count(True)
nonMatching = nonMissing - matching
return IntersectCompare(matching, nonMatching, nonMissing, end - start + 1)
def compareHapsOnIntersectArray(hap1: Haplotype, hap2: Haplotype) -> IntersectCompare:
if MEMORYSAVE:
return compareHapsOnIntersect(hap1, hap2)
nonMissing = 0
matching = 0
missing = 9
if (len(hap1.array) == 0):
hap1.array = hap1.toIntArray()
if (len(hap2.array) == 0):
hap2.array = hap2.toIntArray()
hap1Start = hap1.start
hap2Start = hap2.start
hap1Array = hap1.array
hap2Array = hap2.array
hap1Length = len(hap1)
hap2Length = len(hap2)
start = max(hap1Start, hap2Start)
end = min(hap1Length + hap1Start, hap2Length + hap2Start)
for i in range(start, end):
ind1 = i - hap1Start
ind2 = i - hap2Start
if (hap1Array[ind1] != missing and hap2Array[ind2] != missing):
nonMissing += 1
if (hap1Array[ind1] == hap2Array[ind2]):
matching += 1
nonMatching = nonMissing - matching
return IntersectCompare(matching, nonMatching, nonMissing, end - start + 1)
|
AlphaHousePython
|
/alphahousepython-1.0.6-cp36-cp36m-macosx_10_6_x86_64.whl/alphahousepython/src/Haplotype.py
|
Haplotype.py
|
import math
from hap import PyHaplotype
from ped import PyIndividual
import itertools
class HaplotypeLibraryAlteredException(Exception):
"""Exception to be thrown when there is no genotype information available"""
def __init__(self):
Exception.__init__(self, "Haplotype library altered during iteration!")
class HaplotypeLibrary(object):
def __init__(self, haplotypes = None, individuals = [], coreLengths = None) -> object:
self.partialHaplotypes = haplotypes
if haplotypes is None:
self.partialHaplotypes = []
self.fullHaplotypes = []
for ind in individuals:
self.fullHaplotypes.append(ind.haplotypes[0])
self.fullHaplotypes.append(ind.haplotypes[1])
self.currentIterator = None
self.workingOnHaplotypes =True
self.index = 0
self.coreLengths = coreLengths
if coreLengths is None:
coreLengths = []
self.changed = False
def __iter__(self):
self.workingOnHaplotypes = True
self.index = 0
self.changed = False
return self
def __next__(self):
if (len(self.partialHaplotypes) ==0 and len(self.fullHaplotypes) == 0): raise StopIteration
if self.changed: raise HaplotypeLibraryAlteredException
if self.currentIterator is not None :
try :
return self.currentIterator.__next__()
except StopIteration :
self.currentIterator = None
if self.currentIterator is None :
if self.workingOnHaplotypes :
if self.index < len(self.partialHaplotypes) :
self.index = self.index + 1
return self.partialHaplotypes[self.index-1]
else:
self.workingOnHaplotypes = False
self.index = 0
if not self.workingOnHaplotypes :
if self.index < len(self.fullHaplotypes) :
self.currentIterator = HaplotypeIterator(self.coreLengths,
self.fullHaplotypes[self.index]) # Need to actually do the correct thing here.
self.index = self.index + 1
else:
raise StopIteration
return self.__next__()
def __getitem__(self, item):
if isinstance(item, list):
return [self.partialHaplotypes[i] for i in item]
else:
try:
return self.fullHaplotypes[int(item)]
except ValueError:
raise TypeError("Wrong type given for getter:" + str(type(item)))
def __setitem__(self, key, value):
if (isinstance(value, PyIndividual)):
# todo check keys
self.fullHaplotypes.append(value.haplotypes[0])
self.fullHaplotypes.append(value.haplotypes[1])
elif (isinstance(value, int)):
self.coreLengths.append(value)
elif (isinstance(value, PyHaplotype)):
self.partialHaplotypes.append(value)
self.changed = True
def appendHap(self, h: PyHaplotype):
self.partialHaplotypes.append(h)
class HaplotypeIterator(object) :
def __init__(self, coreLengths, offsets, haplotype) :
self.coreLengths = coreLengths
self.maxSnp = len(haplotype)
self.haplotype = haplotype
self.offsets = offsets
def __iter__(self):
for coreLength in self.coreLengths :
for offset in self.offsets:
end = 0
index = 0
while end < self.maxSnp :
end, subHap = self.getHap(coreLength, offset, index)
yield ((coreLength, offset, index), subHap)
index = index + 1
def getHap(self, coreLength, offset, index):
coreLength = min(coreLength, self.maxSnp)
offset = math.floor(offset*coreLength)
start = max( index*coreLength - offset, 0)
end = min( (index + 1)*coreLength - offset, self.maxSnp)
return (end, self.haplotype.getSubsetHaplotype(start=start,end=end))
class HaplotypeLibraryFrom(object):
def __init__(self, individuals=[], coreLengths=[100]):
self.coreLengths = coreLengths
self.offsets = [0, .5]
self.haplotypes = dict()
for ind in individuals:
self.add(ind.haplotypes[0])
self.add(ind.haplotypes[1])
def add(self, hap):
hapIter = HaplotypeIterator(self.coreLengths, self.offsets, hap)
for pair in hapIter:
index, subHap = pair
if index not in self.haplotypes :
self.haplotypes[index] = []
if subHap.countMissing() == 0:
try:
ii = self.haplotypes[index].index(subHap)
self.haplotypes[index][ii].incrementWeight()
# print("weight increased", self.haplotypes[index][ii].weight)
except ValueError:
# print("Hap added")
self.haplotypes[index].append(subHap)
def __iter__(self):
for key, value in self.haplotypes.items():
for v in value:
yield v
def toList(self):
haps = self.haplotypes.values()
return list(itertools.chain(*haps))
# class test(object) :
# def __init__(self, ids) :
# self.ids = ids
# def __iter__(self) :
# for idx in self.ids :
# yield idx
# aa = test([1, 2, 3, 4])
# for idx in aa:
# print(idx)
|
AlphaHousePython
|
/alphahousepython-1.0.6-cp36-cp36m-macosx_10_6_x86_64.whl/alphahousepython/src/HaplotypeLibrary.py
|
HaplotypeLibrary.py
|
import logging
import numpy as np
from bitarray import bitarray
import Haplotype
import Utils
from Constants import MEMORYSAVE,MISSINGGENOTYPECODE,MISSINGPHASECODE
def checkValid(val):
if (val == MISSINGGENOTYPECODE or val == 0 or val == 2 or val == 1):
return True
return False
class NoGenotypeException(Exception):
"""Exception to be thrown when there is no genotype information available"""
def __init__(self):
Exception.__init__(self, "No Genotype Information Present")
class IncorrectHaplotypeNumberError(Exception):
"""Exception thrown when there is an incorrect number of haplotypes given (2 is usually expected)"""
def __init__(self):
Exception.__init__(self, "Incorrect number of haplotypes present")
class Genotype(object):
# !Reminder as to how the data is stored...
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ! Genotype Homo Additional !
# ! 0 1 0 !
# ! 1 0 0 !
# ! 2 1 1 !
# ! Missing 0 1 !
def __init__(self, array = None,haplotypes = None, lock= False, length=0):
# if cython.compiled:
# print("Yep, I'm compiled.")
# else:
# print("Just a lowly interpreted script.")
self.hasLock = False
self.array = None
if (not haplotypes is None):
# first check if we should initialise the genotypes from the haplotypes
self.__initFromHaplotypes(haplotypes)
elif (not array is None):
# otherwise, try to use the array
if not MEMORYSAVE:
self.array = np.array(array,dtype=np.int8)
else:
self.__initFromArray(array)
else:
# otherwise initialise an empty to an array
missing = np.full(length,fill_value=9,dtype=np.int8)
if MEMORYSAVE:
self.__initFromArray(missing)
else:
self.array = np.array(missing,dtype=np.int8)
if (lock):
self.hasLock = True
self.locked = bitarray(self.locked)
for i in range(0,len(self)):
self.locked[i] = not self.isMissing(i)
def __initFromArray(self, array):
self.homo = bitarray(len(array))
self.additional = bitarray(len(array))
for i in range(0, len(array)):
val = int(array[i])
if (val == 0):
self.homo[i] = True
self.additional[i] = False
elif (val == 1):
self.homo[i] = False
self.additional[i] = False
elif (val == 2):
self.homo[i] = True
self.additional[i] = True
else: # must be missing
self.additional[i] = True
self.homo[i] = False
def __initFromHaplotypes(self, haplotypes :[Haplotype]) -> 'Genotype':
"""Function populates an array based on what values the haplotypes given have"""
if (len(haplotypes) != 2):
raise IncorrectHaplotypeNumberError
length = min(len(haplotypes[0]), len(haplotypes[1]))
array = np.full(length,fill_value=9,dtype=np.int8)
for i,(one,two) in enumerate(zip(haplotypes[0].toIntArray(), haplotypes[1].toIntArray())):
if ((one == 1) and (two == 1)):
array[i] = 2
elif ((one == 0) and (two == 0)):
array[i] = 0
elif (one == MISSINGGENOTYPECODE or two == MISSINGGENOTYPECODE):
# if either is missing - set to missing
array[i] = MISSINGGENOTYPECODE
else:
# if one is 1, and the other is 0
array[i] = 1
self.__init__(array)
def __eq__(self, other : object) -> bool:
if (isinstance(other,Genotype)):
if MEMORYSAVE:
if (self.homo != other.homo or self.additional != other.additional):
return False
else:
return np.array_equal(self.array, other.array)
else:
return False
return True
def __len__(self) -> int:
if MEMORYSAVE:
return len(self.homo)
else:
return len(self.array)
def __str__(self) -> str:
if self.array is not None:
string = " "
string = string.join(str(x) for x in self.array)
else:
string = " "
string = string.join(str(x) for x in self)
return string
def __getitem__(self, item) :
if isinstance(item, slice):
return [self[ii] for ii in range(*item.indices(len(self)))]
elif isinstance(item, list):
return [self[ii] for ii in item]
elif isinstance(item, int):
if item < 0 or item >= len(self):
raise IndexError("The index (%d) is out of range." % item)
else:
if self.array is not None:
return self.array[item]
else:
return self.getGenotype(item)
def __setitem__(self, key, value :int):
if MEMORYSAVE:
self.setGenotype(key,value)
else:
self.setGenotypeArray(key,value)
def __iter__(self) -> iter:
self.n = 0
for i in range(0, len(self)):
yield (self[i])
# yield self.getGenotype(self.n)
def getGenotype(self, pos : int) -> int:
if not MEMORYSAVE:
raise NotImplementedError("ERROR - get genotype should only be called in verbose memory mode")
hom = self.homo[pos]
add = self.additional[pos]
if (hom and add):
return 2
elif (hom):
return 0
elif(add):
return 9
else:
return 1
def append(self, value : int):
self[len(self)] = value
def setGenotypeArray(self, pos, value):
if not checkValid(value):
value = MISSINGGENOTYPECODE
if (pos < len(self)):
self.array[pos] = value
if (pos == len(self)):
self.array = np.append(self.array,[value])
if (self.hasLock):
self.locked.append(Utils.bitMap[False])
def setGenotype(self, pos : int, value : int):
"""sets the value of a genotype at a given position"""
if not MEMORYSAVE:
raise NotImplementedError("ERROR - set genotype should only be called in verbose memory mode")
value = int(value)
if self.array is not None and pos < len(self.array):
self.array[pos] = value
if value == 0:
hom = True
add = False
elif value == 1:
hom = False
add = False
elif value == 2:
hom= True
add= True
else:
hom = False
add = True
if (pos == len(self)):
self.homo.append(Utils.bitMap[hom])
self.additional.append(Utils.bitMap[add])
if (self.hasLock):
self.locked.append(Utils.bitMap[False])
else:
self.homo[pos] = hom
self.additional[pos] = add
def toIntArray(self):
if self.array is not None:
return self. array
ret = np.empty(len(self), np.int8)
c = 0
for hom,add in zip(self.homo, self.additional):
if (hom and add):
ret[c] = 2
elif (hom):
ret[c] = 0
elif (add):
ret[c] = 9
else:
ret[c] = 1
c+=1
return ret
def isHaplotypeCompatable(self, hap: Haplotype, threshold= 0) -> bool:
"""Returns true if number of mismatches are less than threshold"""
if MEMORYSAVE:
res = ((self.homo & ~self.additional) & (hap.phase & ~ hap.missing)) \
| ((self.homo & self.additional) & (~hap.phase & ~hap.missing))
x = res.count(True)
else:
x = 0
if (len(self) != len(hap)):
raise IndexError("Genotype and Haplotype are different lengths ")
for i in range(0,len(self)):
if(self[i] == 0):
if not hap[i] == 0 or hap[i] == MISSINGPHASECODE:
x += 1
elif(self[i] == 2):
if not hap[i] == 1 or hap[i] == MISSINGPHASECODE:
x += 1
return x <= threshold
def numHet(self) -> int:
"""Returns the number of Heterozygous snps"""
if MEMORYSAVE:
return (~self.homo & ~self.additional).count(True)
else:
return (self.array == 1).sum()
def countMissing(self) -> int:
if MEMORYSAVE:
return (~self.homo & self.additional).count(True)
else:
return (self.array == 9).sum()
def percentageMissing(self) -> float:
return self.countMissing() / len(self)
def countNotMissing(self):
return len(self) - self.countMissing()
def countNotEqual(self, other : 'Genotype') -> int:
if MEMORYSAVE:
return ((self.homo ^ other.homo) | (self.additional ^ other.additional)).count(True)
else:
return (np.not_equal(self.array, other.array) == True).sum()
def countNotEqualExcludeMissing(self, other : 'Genotype') -> int:
if MEMORYSAVE:
return (((self.homo ^ other.homo) | (self.additional ^ other.additional)) & ((self.homo | ~self.additional) & (other.homo | ~other.additional))).count(True)
else:
count = 0
for i in range(0, len(self)):
if not (self[i] == MISSINGGENOTYPECODE or other[i] == MISSINGGENOTYPECODE):
if (self[i] != other[i]):
count+=1
return count
def countMismatches(self, other: 'Genotype') -> int:
"""Counts the number of opposing homozygotes between the two genotypes
Parameters
------------
other : Genotype
Genotype to be compared with
"""
if MEMORYSAVE:
x = ((self.homo & other.homo) & (self.additional ^ other.additional))
return x.count(True)
else:
count = 0
for i in range(0, len(self)):
if self[i] == 0:
if other[i] == 2:
count += 1
elif self[i] == 2:
if other[i] == 0:
count += 1
return count
def isMissing(self, pos):
if self[pos] == MISSINGGENOTYPECODE:
return True
return False
def getSubsetGenotype(self, startPos = 0, endPos = 0):
"""Returns a subset genotype object
Parameters
------------
startPos : int
starting position of new genotype
endPos : int
| end position
"""
if (endPos == 0):
endPos = len(self) - 1
if (endPos > len(self) - 1):
logging.exception("WARNING subset given size too big as end position")
endPos = len(self) - 1
if MEMORYSAVE:
empty = np.full((endPos - startPos), MISSINGGENOTYPECODE)
g = Genotype(empty)
g.homo = self.homo[startPos:endPos]
g.additional = self.additional[startPos:endPos]
else:
g = Genotype(self.array[startPos:endPos])
return g
def complement(self,h:Haplotype) -> Haplotype:
if (len(self) != len(h)):
raise IndexError("Genotype and Haploype are different lengths ")
if MEMORYSAVE:
phase = bitarray(len(self))
missing = bitarray(len(self))
y =(h.phase & ~h.missing)
x = ((y & self.homo))
phase = ((h.phase & h.missing) | (((h.phase & ~h.missing) & self.homo) | (~(h.phase | h.missing) & ~(self.homo ^ self.additional))))
missing = (h.missing | (self.additional & ~self.homo)| ((~h.phase & self.additional) & h.phase & (self.homo ^ self.additional )))
return Haplotype.Haplotype(phase=phase, missing= missing)
else:
array = np.full(len(self), MISSINGPHASECODE)
for i in range(0, len(self)):
if (self[i] == 0):
array[i] = 0
elif (self[i] == 2):
array[i] = 1
elif (self[i] == 1):
if (h[i] ==1):
array[i] = 0
elif (h[i] == 0):
array[i] = 1
return Haplotype.Haplotype(array)
def setFromHaplotypesIfMissing(self, h1: Haplotype,h2: Haplotype):
tempG = Genotype(haplotypes=(h1,h2))
self.setFromOtherIfMissing(tempG)
def setFromOtherIfMissing(self,other : 'Genotype'):
if (len(self) != len(other)):
raise IndexError("Genotype are different lengths ")
if MEMORYSAVE:
origHom = self.homo
self.homo = (((self.homo | ~self.additional) & self.homo) | ((~self.homo & self.additional) & other.homo))
self.additional = ((( origHom | ~self.additional) & self.additional) | ( ( ~origHom & self.additional) & other.additional ))
else:
for i in range(0, len(self)):
if self[i] == 9:
self[i] = other[i]
|
AlphaHousePython
|
/alphahousepython-1.0.6-cp36-cp36m-macosx_10_6_x86_64.whl/alphahousepython/src/Genotype.py
|
Genotype.py
|
import csv
import re
import typing
from collections import OrderedDict
from enum import Enum
from typing import *
from gen import PyGenotype
from hap import PyHaplotype, compareHapsOnIntersectArray
import numpy as np
import pandas as pd
try:
from plinkio import plinkfile
except ImportError:
use_plink = False
import Utils as Utils
# from Genotype import Genotype,NoGenotypeException
# from Haplotype import Haplotype, compareHapsOnIntersect,compareHapsOnIntersectArray
from bitarray import bitarray
import HaplotypeLibrary as HaplotypeLibrary
from Constants import MEMORYSAVE,MISSINGGENOTYPECODE,MISSINGPHASECODE
NOPARENT = '0'
try:
import numba as nb
from nb import jit, int8, int32, boolean, jitclass
except ImportError:
use_numba = False
from _faker import jit, int8, int32, boolean, jitclass
# "ctypedef" assigns a corresponding compile-time type to DTYPE_t. For
# every type in the numpy module there's a corresponding compile-time
# type with a _t-suffix.
#####THIS SHOULD NOT BE HERE!!! (but I'm sticking it here anyways...)
class Gender(Enum):
"""SIMPLE ENUM CLASS SHOWING GENDER"""
MALE = 1
FEMALE = 2
UNKNOWN = 9
class Inconsistencies:
"""Class for representing mendelian inconsistencies, in terms of parental inconsistencies"""
def __init__(self, g, pg, mg):
if MEMORYSAVE:
het = (~ (g.homo | g.additional)) & ((pg.homo & mg.homo) & (~pg.additional ^ mg.additional))
self.maternalInconsistent = (het | ((g.homo & mg.homo) & (g.additional ^ mg.additional)))
self.paternalInconsistent = (het | ((g.homo & pg.homo) & (g.additional ^ pg.additional)))
patPres = pg.homo | ~pg.additional
matPres = mg.homo | ~mg.additional
indPres = g.homo | ~g.additional
else:
self.maternalInconsistent = bitarray(len(g))
self.paternalInconsistent = bitarray(len(g))
patPres = bitarray(len(g))
matPres = bitarray(len(g))
indPres = bitarray(len(g))
for i in range(0, len(g)):
if (g[i] != MISSINGGENOTYPECODE):
indPres[i] = True
else:
indPres[i] = False
if (pg[i] != MISSINGGENOTYPECODE):
patPres[i] = True
else:
patPres[i] = True
if (mg[i] != MISSINGGENOTYPECODE):
matPres[i] = True
else:
matPres[i] = True
# corresponds to HET step above
if (g[i] == 1 and ((pg[i] == 0 and mg[i] == 2) or (pg[i]==2 and mg[i] == 0 ))):
self.maternalInconsistent[i] = True
self.paternalInconsistent[i] = True
continue
if (g[i] == 0 and pg[i] == 2) or (g[i] == 2 and pg[i] == 0):
self.paternalInconsistent[i] = True
if (g[i] == 0 and mg[i] == 2) or (g[i] == 2 and mg[i] == 0):
self.maternalInconsistent[i] = True
self.individualInconsistent = self.paternalInconsistent | self.maternalInconsistent
self.paternalConsistent = (indPres & patPres) & (~ self.paternalInconsistent)
self.maternalConsistent = (indPres & matPres) & (~self.maternalInconsistent)
self.individualConsistent = (indPres & matPres & patPres) & ~self.individualInconsistent
class Individual:
def __init__(self, id, sireId, damId, founder, recId, gender = Gender.UNKNOWN,
genotype = None,
haplotypes = None, initToSnps= 0):
"""Default constructor of Individual"""
self.id = id
self.recId = recId
self.sireId = sireId
self.damId = damId
self.offsprings = []
self.sire = None
self.dam = None
self.founder = founder
self.generation = 0
self.genotype = genotype
self.haplotypes = haplotypes
self.inconsistenciesCount = 0
self.inconsistencies = None
self.mend = None
self.reads = []
self.gender = gender
self.weight = 0
if (self.haplotypes is None):
self.haplotypes = []
if (initToSnps != 0):
missingArray = np.full(initToSnps, 9)
if (self.haplotypes is None):
self.haplotypes[0] = PyHaplotype(missingArray)
self.haplotypes[1] = PyHaplotype(missingArray)
else:
for i in range(len(self.haplotypes[0]), initToSnps):
self.haplotypes[0][i] = 9 # extends haplotype object
self.haplotypes[1][i] = 9 # extends haplotype object
if (self.genotype is None):
self.genotype = PyGenotype(missingArray)
else:
for i in range(len(self.genotype), initToSnps):
self.genotype[i] = 9
def getInconsistencies(self) -> Optional[Inconsistencies]:
if (self.genotype is None):
pass
self.inconsistencies = np.zeros(len(self.genotype))
if (not (self.sire is None or self.dam is None)):
self.mend = Inconsistencies(self.genotype, self.sire.genotype, self.dam.genotype)
return self.mend
return None
def getParentBasedOnIndex(self, index) -> 'Individual':
if (index == 0):
return self.sire
elif (index == 1):
return self.dam
def indivParentHomozygoticFillIn(self, hetGameticStatus=None, homGameticParent=0):
if not self.founder:
#
for e in range(0, 2):
parent = self.getParentBasedOnIndex(e)
tmpG = PyGenotype(haplotypes=parent.haplotypes)
self.haplotypes[e].setFromGenotypeIfMissing(tmpG)
self.makeIndividualGenotypeFromPhase()
def __hash__(self):
"""Returns the hash of the id"""
return self.id.__hash__()
# def __dict__(self) -> dict:
#
# ret = {}
# ret["id"] = self.id
# ret["recI"] = self.recId
# ret["sireId"] = self.sireId
# ret["damId"] = self.damId
# ret["nOffs"] = len(self.offsprings)
# #do all offsprings
# # currently doesn't do genotypes and haplotypes
def __eq__(self, o: object) -> bool:
if (not isinstance(o, Individual)):
return False
if (not self.id == o.id):
return False
if (not self.recId == o.recId):
return False
if (not self.sireId == o.sireId):
return False
if (not self.damId == o.damId):
return False
return True
def getParentArray(self) -> []:
return (self.sire, self.dam)
def __str__(self) -> str:
return self.id + " " + self.sireId + " " + self.damId + '\n'
def addOffspring(self, offspring: 'Individual'):
self.offsprings.append(offspring)
if (offspring.sireId == self.id):
offspring.sire = self
self.gender = Gender.MALE
elif (offspring.damId == self.id):
offspring.dam = self
self.gender = Gender.FEMALE
else:
print("ERROR - animal is being given offspring that is unrelated")
def writeOutPhase(self) -> str:
'''Prints out the phase in alphasuite format
This is a two line per individual format
e.g.
ANIMID 1 0 0 1
ANIMID 1 0 1 0
'''
string = ""
if (len(self.haplotypes) != 0):
string += self.id + " " + str(self.haplotypes[0]) + '\n'
string += self.id + " " + str(self.haplotypes[1]) + '\n'
else:
string += self.id + '\n'
string += self.id + '\n'
return string
def writeOutGenotypes(self) -> str:
"""returnsthe genotype information as a string per individual
If no genotype is present, tries to create genotype information if haplotypes are present"""
string = ""
if (len(self.haplotypes) == 0 and self.genotype == None):
return self.id + '\n'
elif (self.genotype == None):
self.genotype = np.full(1, fill_value=9, dtype=np.int8)
# self.genotype = PyGenotype(haplotypes=self.haplotypes)
string += self.id + " " + str(self.genotype) + '\n'
return string
def removeOffspring(self, offspring: 'Individual', dummy=None):
"""Removes offspring object, and fixes pointer fields in offspring"""
self.offsprings.remove(offspring)
if (offspring.sireId == self.id):
offspring.sire = dummy
if (dummy is not None):
offspring.sireId = dummy.id
else:
offspring.sireId = '0'
elif (offspring.damId == self.id):
offspring.dam = dummy
if (dummy is not None):
offspring.damId = dummy.id
else:
offspring.damId = '0'
else:
print("ERROR - animal is being given offspring that is unrelated")
def makeIndividualPhaseCompliment(self):
comp2 = self.genotype.complement(self.haplotypes[0])
comp1 = self.genotype.complement(self.haplotypes[1])
self.haplotypes[0].setFromOtherIfMissing(comp1)
self.haplotypes[1].setFromOtherIfMissing(comp2)
def makeIndividualGenotypeFromPhase(self):
if self.genotype is None:
self.genotype = PyGenotype(length=len(self.haplotypes[0]))
self.genotype.setFromHaplotypesIfMissing(self.haplotypes[0], self.haplotypes[1])
@jit
def indivHomoFillIn(self):
for i, val in enumerate(self.genotype):
if (val == 2):
self.haplotypes[0][i] = 1
self.haplotypes[1][i] = 1
if (val == 0):
self.haplotypes[0][i] = 0
self.haplotypes[1][i] = 0
def getPercentageNonMissingHaps(self) -> tuple:
return (1 - self.haplotypes[0].percentageMissing(),1 - self.haplotypes[1].percentageMissing())
def isFullyPhased(self) -> bool:
hap1, hap2 = self.getPercentageNonMissingHaps()
return hap1 + hap2 == 2
def isHD(self, HDTHRESHOLD = 0.9) -> bool:
return self.getPercentageNonMissingGeno() > HDTHRESHOLD
def getPercentageNonMissingGeno(self) -> float:
return 1 - self.genotype.percentageMissing()
def randomlyPhasedMidPoint(self):
if self.genotype is None: raise NoGenotypeException
mid = len(self.genotype) /2
index = -1
for i in range(0, len(self.genotype)-int(mid)-1):
if self.genotype[int(mid) + i] == 1:
index = int(mid) + i
break
if self.genotype[int(mid) - i] == 1:
index = int(mid) - i
break
if index != -1 :
self.haplotypes[0][index] = 0
self.haplotypes[1][index] = 1
areDifferent = compareHapsOnIntersectArray(self.haplotypes[1], self.haplotypes[0])
# print("Post Phasing", areDifferent.matching, areDifferent.nonMatching)
# if index == -1:
# print("No midpoint found")
class DummyIndiv(Individual):
"""Individual that is defined as a DUMMY"""
def __init__(self):
super(DummyIndiv, self).__init__('0', '0', '0', True, 0)
class Pedigree:
"""Pedigree
Class contains individual objects, as well as ordered dictionary of genotyped individuals
Also contains founders (which is also an ordered dictionary)
"""
def __init__(self, file= None, plinkFile= None, array = None,
genotypeArray = None, initToSnps= 0):
self.individuals = OrderedDict()
self.genotypedIndividuals = OrderedDict()
self.founders = OrderedDict()
self.generations = []
self.sorted = False
if plinkFile is not None:
self.__initFromPlinkFile(plinkFile)
elif file is not None:
self.__initFromFile(file, genotypeArray, initToSnps)
elif array is not None:
self.__initFromArray(array, genotypeArray, initToSnps)
def __initFromFile(self, file, genotypes=None, initToSnps=0):
i = open(file, 'r')
array = [] # should be array of string tuple
# add 0 in to state that pedigree is ordered
for l in i:
s = l.rstrip().split(" ")
s = list(filter(lambda name: name.strip(), s))
array.append(s)
i.close()
self.__initFromArray(array, genotypes, initToSnps)
def __initFromArray(self, animal_array, genotypes= None, initToSnps=0):
"""Inits pedigree from array"""
zero = DummyIndiv()
self['0'] = zero
count = 0
animals = [] # list should be a tuple
temp = []
gCount = 0
for s in animal_array:
Founder = False
if (s[1] == NOPARENT and s[2] == NOPARENT):
Founder = True
g = None
if (genotypes is not None):
g = PyGenotype(genotypes[gCount])
gCount += 1
elif initToSnps != 0: # if in
g = PyGenotype(np.full(initToSnps, 9))
animal = Individual(s[0], s[1], s[2], Founder, count, g)
if (g is not None):
self.genotypedIndividuals[s[0]] = animal
if Founder:
self.founders[animal.id] = animal
elif (animal.sireId in self.individuals and animal.damId in self.individuals):
animals.append(animal)
self[animal.sireId].addOffspring(animal)
self[animal.damId].addOffspring(animal)
else:
temp.append(animal)
self[s[0]] = animal # actually a anim to ped
for e in temp:
if (e.sireId in self.individuals):
self[e.sireId].addOffspring(e)
elif (e.sireId != NOPARENT):
self[e.sireId] = Individual(e.sireId, NOPARENT, NOPARENT, True, len(self) + 1)
self[e.sireId].addOffspring(e)
else:
print(("ERROR in ped - contact developer"))
if (e.damId in self.individuals):
self[e.damId].addOffspring(e)
elif (e.damId != NOPARENT):
self[e.damId] = Individual(e.damId, NOPARENT, NOPARENT, True, len(self) + 1)
self[e.damId].addOffspring(e)
# e.damId = '0'
# e.founder = True
# self.founders.append(e)
def __initFromPlinkFile(self, fileName: str, startSnp= None, endSnp= None):
pf = plinkfile.open(fileName)
samples = pf.get_samples()
locus = pf.get_loci()
nSnp = len(locus)
nInd = len(samples)
if (startSnp is None):
startSnp = 0
if (endSnp is None):
endSnp = nSnp
array = []
for ind in samples:
a = [ind.iid, ind.father_iid, ind.mother_iid]
array.append(a)
values = np.zeros((nInd, endSnp - startSnp))
for snpIndex in range(startSnp - 1):
pf.next()
for snpIndex in range(endSnp - startSnp):
if snpIndex % 1000 == 0: print("read %s out of %s" % (snpIndex, endSnp - startSnp + 1))
row = np.array(pf.next())
maf = np.mean(row)
values[:, snpIndex] = np.array(row) - maf # Maf Centered
self.__initFromArray(animal_array=array, genotypes=values)
def __str__(self) -> str:
arr = ([str(item) for item in list(self.individuals.values())])
return ''.join(arr)
def getPhaseAsString(self) -> str:
"""gets phase information for all non dummy animals as a string"""
arr = (
[item.writeOutPhase() for item in list(self.individuals.values()) if (not (isinstance(item, DummyIndiv)))])
return ''.join(arr)
def writeOutPhase(self, file):
"""Writes out phase information to file"""
f = open(file, 'w')
for i in self:
if (isinstance(i, DummyIndiv)): continue
f.write(i.writeOutPhase())
f.close()
def getGenotypesAsString(self) -> str:
arr = (
[item.writeOutGenotypes() for item in list(self.individuals.values()) if (not (isinstance(item, DummyIndiv)))])
return ''.join(arr)
def writeOutGenotypes(self, file):
"writes out genotype informations to a file"
f = open(file, 'w')
for i in self:
if (isinstance(i, DummyIndiv)): continue
f.write(i.writeOutGenotypes())
f.close()
def getGenotypesAll(self) -> np.ndarray:
"""Function returns all genotypes in an array
:return np.ndarray[indviduals, snps]
"""
ret = np.zeros((len(self.individuals), len(self[0].genotype)))
for ind in self.individuals:
np.append(ret, ind.genotype.toIntArray())
return ret
def getGenotypes(self) -> np.ndarray:
"""Gets 3D array of genotypes"""
ret = np.zeros((len(self.genotypedIndividuals), len(self.genotypedIndividuals[0].genotype)))
for ind in self.genotypedIndividuals:
np.append(ret, ind.genotype.toIntArray())
return ret
def getNumIndivs(self) -> int:
"""DUPLICATE OF LENGTH"""
# TODO Why is this here???
return len(self)
def __len__(self) -> int:
"""returns number of individuals stored in pedigree object"""
return len(self.individuals)
def __eq__(self, other: 'Pedigree'):
"""compares the pedigree in terms of individuals within the pedigree"""
if (len(self) != len(other)):
return False
for i, b in zip(self.individuals, other.individuals):
if (i != b):
return False
return True
def __iter__(self) -> Iterator[Individual]:
"""Iterates through individuals and yields individual items """
iter = self.individuals.items().__iter__()
for i in iter:
yield i[1]
def __getitem__(self, item) -> Union[Individual,List]:
"""gets individuals"""
if isinstance(item, list):
return [self.individuals[i] for i in item]
elif isinstance(item, slice):
return [self[ii] for ii in range(*item.indices(len(self)))]
elif isinstance(item, str):
return self.individuals[item]
else:
try:
return self.individuals[str(item)]
except ValueError:
raise TypeError("Wrong type given for getter:" + str(type(item)))
def __setitem__(self, key: str, value: Individual):
"""Sets individuals """
if (not key in self.individuals):
value.recId= len(self.individuals)
self.sorted = False
self.individuals[key] = value
if (value.founder):
self.founders[value.id] = value
def setIndividualAsGenotyped(self, id: str):
ind = self[id]
if (not ind.id in self.genotypedIndividuals):
self.genotypedIndividuals[ind.id] = ind
def addGenotypesFromFile(self, file: str, initAll = False):
"""Adds genotype information to corresponding animals in pedigree from file
will overwrite corresponding information already present
if InitAll is present - will overwrite all genotype information for all animals
"""
size = 0
# with open(file, 'r').readlines() as f:
f = open(file, 'r')
for l in f.readlines():
t = l.strip().split(" ")
if (not t[0] in self.individuals):
print("Warning animal %s not in Pedigree, will be added as founder"), t[0]
self[t[0]] = Individual('0', '0', '0', True, 0)
g = PyGenotype(np.array(t[1:],dtype=int))
self.individuals[t[0]].genotype = g
self.individuals[t[0]].inconsistencies = np.zeros(len(g))
self.genotypedIndividuals[t[0]] = self[t[0]]
if (size == 0):
size = len(t) - 1
f.close()
if initAll:
empty = np.full(size, fill_value=MISSINGGENOTYPECODE)
for ind in self:
if ind.genotype is None:
ind.genotype = PyGenotype(empty)
ind.inconsistencies = np.zeros(size)
if (len(ind.haplotypes) == 0):
ind.haplotypes.append(PyHaplotype(empty))
ind.haplotypes.append(PyHaplotype(empty))
def compareToTrueFile(self, file: str, ids = None):
"""Returns yield and accuracy of genotype information for pedigree compared to a specified true file"""
# with open(file, 'r').readlines() as f:
if ids is None: ids = [ind for ind in self.individuals.keys()]
f = open(file, 'r')
yieldCount = 0
diffs = 0
totalLength = 0
for l in f.readlines():
t = l.strip().split(" ")
g = PyGenotype(np.array(t[1:],dtype=int))
if (t[0] in ids and t[0] in self.individuals):
comp = self.individuals[t[0]].genotype
yieldCount += comp.countNotMissing()
totalLength += len(comp)
diffs += comp.countNotEqualExcludeMissing(g)
yieldRet = float(yieldCount) / totalLength
accuracy = 1 - float(diffs) / yieldCount
f.close()
return (yieldRet, accuracy)
def compareToTrueFilePhase(self, file: str, ids = None):
"""Returns yield and accuracy of genotype information for pedigree compared to a specified true file"""
# with open(file, 'r').readlines() as f:
if ids is None: ids = [ind for ind in self.individuals.keys()]
f = open(file, 'r')
yieldCount = 0
diffs = 0
totalLength = 0
lines = dict()
for l in f.readlines():
t = l.strip().split(" ")
g = PyHaplotype(t[1:])
if (t[0] in ids and t[0] in self.individuals):
if t[0] not in lines :
lines[t[0]] = g
else:
g0 = lines.pop(t[0]) #This also removes it from the dict.
g1 = g
comp0 = self.individuals[t[0]].haplotypes[0]
comp1 = self.individuals[t[0]].haplotypes[1]
yieldCount += comp0.countNotMissing()
yieldCount += comp1.countNotMissing()
totalLength += len(comp0) + len(comp1)
diffsAlign = comp0.countNotEqualExcludeMissing(g0) + comp1.countNotEqualExcludeMissing(g1)
diffsNot = comp0.countNotEqualExcludeMissing(g1) + comp1.countNotEqualExcludeMissing(g0)
diffs += min(diffsAlign, diffsNot)
yieldRet = float(yieldCount) / totalLength
if yieldCount != 0 : accuracy = 1 - float(diffs) / yieldCount
else: accuracy = -1
f.close()
return (yieldRet, accuracy)
def addGenotypesFromFileFast(self, file: str, initAll = False):
size = 0
# with open(file, 'r').readlines() as f:
f = open(file, 'r')
x = np.loadtxt(file, dtype=str)
for t in x:
# t = l.strip().split(" ")
if (not t[0] in self.individuals):
print("Warning animal %s not in Pedigree, will be added as founder"), t[0]
self[t[0]] = Individual('0', '0', '0', True, 0)
g = PyGenotype(t[1:])
self.individuals[t[0]].genotype = g
self.individuals[t[0]].inconsistencies = np.zeros(len(g))
self.genotypedIndividuals[t[0]] = self[t[0]]
if (size == 0):
size = len(t) - 1
f.close()
# if initAll:
# empty = np.full(size, fill_value=MISSINGGENOTYPECODE)
# for ind in self:
# if ind.genotype is None:
# ind.genotype = Genotype(empty)
# ind.inconsistencies = np.zeros(size)
# if (len(ind.haplotypes) == 0):
# ind.haplotypes.append(Haplotype(empty))
# ind.haplotypes.append(Haplotype(empty))
def addGenotypesFromArray(self, array: np.ndarray, initAll= False):
size = 0
for t in array:
if (not t[0] in self.individuals):
print("Warning animal %s not in Pedigree, will be added as founder" % t[0])
self[t[0]] = Individual('0', '0', '0', True, 0)
g = PyGenotype(t[1:])
self.individuals[t[0]].genotype = g
self.individuals[t[0]].inconsistencies = np.zeros(len(g))
self.genotypedIndividuals[t[0]] = self[t[0]]
if (size == 0):
size = len(t[1:])
if initAll:
empty = np.full(size, fill_value=MISSINGGENOTYPECODE)
for ind in self:
if ind.genotype is None:
ind.genotype = PyGenotype(empty)
ind.inconsistencies = np.zeros(size)
def addPhaseFromFile(self, file: str, initAll= False, overwrite=False):
'''Adds phase information from alphasuite format file'''
size = 0
with open(file, 'r') as f:
for l in f:
t = re.split('\s+', l.strip())
if (not t[0] in self.individuals):
print("Warning animal %s not in Pedigree, will be added as founder" % t[0])
self[t[0]] = Individual('0', '0', '0', True, 0)
h = PyHaplotype(t[1:])
ind = self.individuals[t[0]]
if overwrite and len(ind.haplotypes) == 2:
ind.haplotypes = []
ind.haplotypes.append(h)
if (size == 0):
size = len(t[1:])
if initAll:
empty = np.full(size, fill_value=MISSINGGENOTYPECODE)
for ind in self:
if ind.haplotypes[0] is None:
ind.haplotypes.insert(0, PyHaplotype(empty))
ind.haplotypes.insert(1, PyHaplotype(empty))
def addPhaseFromArray(self, array: np.ndarray, initAll = False):
size = 0
hCount = 0
for t in array:
if (not t[0] in self.individuals):
print("Warning animal %s not in Pedigree, will be added as founder" % t[0])
self[t[0]] = Individual('0', '0', '0', True, 0)
h = PyHaplotype(t[1:])
if (hCount == 2): hCount = 0
self.individuals[t[0]].haplotypes[hCount] = h
hCount += 1
if (size == 0):
size = len(t[1:])
if initAll:
empty = np.full(size, fill_value=MISSINGGENOTYPECODE)
for ind in self:
if ind.haplotypes[0] is None:
ind.haplotypes[0] = PyHaplotype(empty)
ind.haplotypes[1] = PyHaplotype(empty)
def getMatePairs(self: 'Pedigree'):
"""Gets pairs of mates that are in the pedigree"""
checked = {}
for ind in self.individuals.items():
for off in ind.offsprings:
if off.sire == ind:
pair = Utils.generateParing(ind.recId, off.dam.recId)
if pair in checked:
checked[pair].offspring.append(off)
else:
checked[pair] = Family(ind, off.dam, off)
elif off.dam == ind:
pair = Utils.generateParing(ind.recId, off.sire.recId)
if pair in checked:
checked[pair].offspring.append(off)
else:
checked[pair] = Family(off.sire, ind, off)
return checked
def sortPedigree(self: 'Pedigree') -> 'Pedigree':
"""Returns a new pedigree that is sorted """
new = Pedigree()
seen = {}
self.generations = []
new.founders = self.founders
seen['0'] = True
new['0'] = self['0']
currentGen = 0
# new['0'] = Individual('0', '0', '0', True, 0)
for i in self.founders:
self.__checkOffspring(self.founders[i], seen, self.generations, currentGen)
new.generations = self.generations
# self.individuals = OrderedDict()
for g in self.generations:
for animal in g:
new.individuals[animal.id] = animal
if (animal.id in self.genotypedIndividuals):
new.genotypedIndividuals[animal.id] = animal
new.sorted = True
return new
def findMendelianInconsistencies(self: 'Pedigree', threshold = 0.05, file = None):
"""Function computes the inconsistencies properties across the pedigree"""
snpChanges = 0
countChanges = 0
sireRemoved = False
damRemoved = False
if not self.sorted:
print("WARNING - Pedigree is not sorted - MendelianInconsistencies might be unreliable")
for ind in self:
if (ind.founder):
continue
mend = ind.getInconsistencies()
sireIncon = mend.paternalInconsistent.count(True)
ind.sire.inconsistenciesCount += sireIncon
damIncon = mend.maternalInconsistent.count(True)
ind.dam.inconsistenciesCount += damIncon
if (float(sireIncon) / len(ind.genotype) > threshold):
countChanges += 1
ind.sire.removeOffspring(ind, self['0'])
sireRemoved = True
if (float(damIncon) / len(ind.genotype) > threshold):
countChanges += 1
ind.dam.removeOffspring(ind, self['0'])
damRemoved = True
if (not sireRemoved):
count = 0
for i, c in zip(mend.paternalInconsistent, mend.paternalConsistent):
if i:
ind.sire.inconsistencies[count] += 1
count += 1
if c:
ind.sire.inconsistencies[count] -= 1
count += 1
if (not damRemoved):
count = 0
for i, c in zip(mend.maternalInconsistent, mend.maternalConsistent):
if i:
ind.dam.inconsistencies[count] += 1
if c:
ind.dam.inconsistencies[count] -= 1
count += 1
for ind in self:
if ind.founder:
continue
for i in range(0, len(ind.genotype)):
if ind.mend.individualInconsistent[i]:
snpChanges += 1
if (ind.sire.inconsistencies[i] > ind.dam.inconsistencies[i]):
ind.genotype[i] = MISSINGGENOTYPECODE
ind.sire.genotype[i] = MISSINGGENOTYPECODE
elif (ind.sire.inconsistencies[i] < ind.dam.inconsistencies[i]):
ind.genotype[i] = MISSINGGENOTYPECODE
ind.dam.genotype[i] = MISSINGGENOTYPECODE
else:
ind.genotype[i] = MISSINGGENOTYPECODE
ind.sire.genotype[i] = MISSINGGENOTYPECODE
ind.dam.genotype[i] = MISSINGGENOTYPECODE
# TODO think about the case of having only one parent
def __checkOffspring(self: 'Pedigree', animal: Individual, seen: [bool], generations: [], currentGen: int):
if (not (animal.id == '0')):
seen[animal.id] = True
if currentGen > len(generations) - 1:
generations.append([])
animal.generation = currentGen
generations[currentGen].append(animal)
# file.write(animal.id +" " + animal.sireId + " " + animal.damId+'\n')
for off in animal.offsprings:
if (off.sireId in seen and off.damId in seen):
m = max(off.sire.generation, off.dam.generation)
self.__checkOffspring(off, seen, generations, m + 1)
class Family:
def __init__(self, sire: Individual, dam: Individual, off: Individual):
"""Class to define a family in the Pedigree"""
self.sire = sire
self.dam = dam
self.children = set()
self.children.add(off)
infile = "pedigree.txt"
# infile = "merge_illumina"
outfile = "ped.out"
# o = open(outfile,'w')
# # ped = Pedigree(plinkFile=infile)
# ped = Pedigree(file=infile)
# y = ped.sortPedigree()
#
# print(y)
# o.write(y.__str__())
|
AlphaHousePython
|
/alphahousepython-1.0.6-cp36-cp36m-macosx_10_6_x86_64.whl/alphahousepython/src/PedigreeHolder.py
|
PedigreeHolder.py
|
# AlphaPeel
AlphaPeel is a software package for calling, phasing, and imputing genotype and sequence data in pedigree populations. This program implements single locus peeling, multi locus peeling, and hybrid peeling. A complete description of these methods is given in <a href="https://gsejournal.biomedcentral.com/articles/10.1186/s12711-018-0438-2">Hybrid peeling for fast and accurate calling, phasing, and imputation with sequence data of any coverage in pedigrees, Genetics Selection Evolution</a>.
## User guide
See `docs/source/index.rst` or PDF in the ZIP file in this repository.
## Conditions of use
AlphaPeel is part of a suite of software that our group has developed. It is fully and freely available for all use under the MIT License.
## Suggested Citation
Whalen, A, Ros-Freixedes, R, Wilson, DL, Gorjanc, G, Hickey, JM. (2018). Hybrid peeling for fast and accurate calling, phasing, and imputation with sequence data of any coverage in pedigrees. Genetics Selection Evolution; doi: <a href="https://doi.org/10.1186/s12711-018-0438-2"> https://doi.org/10.1186/s12711-018-0438-2</a>
## Requirements
* Python 3
* NumPy
* Numba
## Installation
wget https://github.com/AlphaGenes/AlphaPeel/raw/main/AlphaPeel.zip
unzip AlphaPeel.zip
pip install AlphaPeel/AlphaPeel*.whl
## Build instructions
Run the following to build the Python wheel and user guide. You will need an installation of [Sphinx](https://www.sphinx-doc.org) and [LaTex](https://www.latex-project.org/get) to build the user guide.
git clone --recurse-submodules https://github.com/AlphaGenes/AlphaPeel.git
cd AlphaPeel
mamba create -n AlphaPeel
mamba activate AlphaPeel
mamba install python=3.9
pip install sphinx
./build_pipeline.sh
pip install --force-reinstall dist/AlphaPeel*.whl
cd example; ./runScript.sh
The wheel can be found in `dist/` and PDF of the user guide at `docs/build/latex/alphaplantimpute2.pdf`
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/README.md
|
README.md
|
import numpy as np
from numba import jit, float32, int32, int64, optional
from numba.experimental import jitclass
from .tinyhouse import Pedigree
from .tinyhouse import ProbMath
from .tinyhouse import InputOutput
from .Peeling import Peeling
from .Peeling import PeelingIO
from .Peeling import PeelingInfo
from .Peeling import PeelingUpdates
import concurrent.futures
from itertools import repeat
import argparse
def runPeelingCycles(pedigree, peelingInfo, args, singleLocusMode = False):
#Right now maf _only_ uses the penetrance so can be estimated once.
if args.estmaf: PeelingUpdates.updateMaf(pedigree, peelingInfo)
for i in range(args.ncycles):
print("Cycle ", i)
peelingCycle(pedigree, peelingInfo, args = args, singleLocusMode = singleLocusMode)
peelingInfo.iteration += 1
# esttransitions is been disabled.
# if args.esttransitions:
# print("Estimating the transmission rate is currently a disabled option")
# PeelingUpdates.updateSeg(peelingInfo) #Option currently disabled.
if args.esterrors:
PeelingUpdates.updatePenetrance(pedigree, peelingInfo)
def peelingCycle(pedigree, peelingInfo, args, singleLocusMode = False) :
nWorkers = args.maxthreads
for index, generation in enumerate(pedigree.generations):
print("Peeling Down, Generation", index)
jit_families = [family.toJit() for family in generation.families]
if args.maxthreads > 1:
with concurrent.futures.ThreadPoolExecutor(max_workers=nWorkers) as executor:
results = executor.map(Peeling.peel, jit_families, repeat(Peeling.PEEL_DOWN), repeat(peelingInfo), repeat(singleLocusMode))
else:
for family in jit_families:
Peeling.peel(family, Peeling.PEEL_DOWN, peelingInfo, singleLocusMode)
for index, generation in enumerate(reversed(pedigree.generations)):
print("Peeling Up, Generation", len(pedigree.generations) - index - 1)
jit_families = [family.toJit() for family in generation.families]
if args.maxthreads > 1:
with concurrent.futures.ThreadPoolExecutor(max_workers=nWorkers) as executor:
results = executor.map(Peeling.peel, jit_families, repeat(Peeling.PEEL_UP), repeat(peelingInfo), repeat(singleLocusMode))
else:
for family in jit_families:
Peeling.peel(family, Peeling.PEEL_UP, peelingInfo, singleLocusMode)
sires = set()
dams = set()
for family in generation.families:
sires.add(family.sire)
dams.add(family.dam)
updatePosterior(pedigree, peelingInfo, sires, dams)
# updatePosterior updates the posterior term for a specific set of sires and dams.
# The updateSire and updateDam functions perform the updates for a specific sire
# and specific dam by including all of the information from all of their families.
# This update is currently not multithreaded. It is also currently not ideal --
# right now the posterior term is updated for all of the sires/dams no matter
# whether or not they have been changed since the last update.
def updatePosterior(pedigree, peelingInfo, sires, dams) :
# if pedigree.mapSireToFamilies is None or pedigree.mapDamToFamilies is None:
# pedigree.setupFamilyMap()
for sire in sires:
updateSire(sire, peelingInfo)
for dam in dams:
updateDam(dam, peelingInfo)
def updateSire(sire, peelingInfo) :
famList = [fam.idn for fam in sire.families]
sire = sire.idn
peelingInfo.posterior[sire,:,:] = 0
for famId in famList:
log_update = np.log(peelingInfo.posteriorSire_new[famId,:,:])
peelingInfo.posterior[sire,:,:] += log_update
peelingInfo.posteriorSire_minusFam[famId,:,:] = -log_update
for famId in famList:
peelingInfo.posteriorSire_minusFam[famId,:,:] += peelingInfo.posterior[sire,:,:]
#Rescale values.
peelingInfo.posterior[sire,:,:] = Peeling.expNorm1D(peelingInfo.posterior[sire,:,:])
peelingInfo.posterior[sire,:,:] /= np.sum(peelingInfo.posterior[sire,:,:], 0)
for famId in famList:
peelingInfo.posteriorSire_minusFam[famId,:,:] = Peeling.expNorm1D(peelingInfo.posteriorSire_minusFam[famId,:,:])
peelingInfo.posteriorSire_minusFam[famId,:,:] /= np.sum(peelingInfo.posteriorSire_minusFam[famId,:,:], 0)
def updateDam(dam, peelingInfo) :
famList = [fam.idn for fam in dam.families]
dam = dam.idn
peelingInfo.posterior[dam,:,:] = 0
for famId in famList:
log_update = np.log(peelingInfo.posteriorDam_new[famId,:,:])
peelingInfo.posterior[dam,:,:] += log_update
peelingInfo.posteriorDam_minusFam[famId,:,:] = -log_update
for famId in famList:
peelingInfo.posteriorDam_minusFam[famId,:,:] += peelingInfo.posterior[dam,:,:]
peelingInfo.posterior[dam,:,:] = Peeling.expNorm1D(peelingInfo.posterior[dam,:,:])
peelingInfo.posterior[dam,:,:] /= np.sum(peelingInfo.posterior[dam,:,:], 0)
for famId in famList:
peelingInfo.posteriorDam_minusFam[famId,:,:] = Peeling.expNorm1D(peelingInfo.posteriorDam_minusFam[famId,:,:])
peelingInfo.posteriorDam_minusFam[famId,:,:] /= np.sum(peelingInfo.posteriorDam_minusFam[famId,:,:], 0)
def getLociAndDistance(snpMap, segMap):
nSnp = len(snpMap)
distance = np.full(nSnp, 0, dtype = np.float32)
loci = np.full((nSnp, 2), 0, dtype = np.int64)
# Assume snp map and segMap are sorted.
segIndex = 0
for i in range(nSnp) :
pos = snpMap[i]
# Move along the segMap until we reach a point where we are either at the end of the map, or where the next seg marker occurs after the position in the genotype file.
# This assumes sorting pretty heavily. Alternative would be to find the neighboring positions in the seg file for each marker in the genotype file.
while segIndex < (len(segMap)-1) and segMap[segIndex + 1] < pos :
segIndex += 1
# Now that positions are known, choose the neighboring markers and the distance to those markers.
# First two if statements handle the begining and ends of the chromosome.
if segIndex == 0 and segMap[segIndex] > pos :
loci[i, :] = (segIndex, segIndex)
distance[i] = 0
elif segIndex == (len(segMap)-1) and segMap[segIndex] <= pos :
loci[i, :] = (segIndex, segIndex)
distance[i] = 0
else:
loci[i,:] = (segIndex, segIndex + 1)
gap = segMap[segIndex+1] - segMap[segIndex]
distance[i] = 1.0 - (pos - segMap[segIndex])/gap #At distance 0, only use segIndex. At distance 1, use segIndex + 1.
return (loci, distance)
def generateSingleLocusSegregation(peelingInfo, pedigree, args):
segInfo = None
if args.segfile is not None:
# This just gets the locations in the map files.
snpMap = np.array(InputOutput.readMapFile(args.mapfile, args.startsnp, args.stopsnp)[2])
segMap = np.array(InputOutput.readMapFile(args.segmapfile)[2])
loci, distance = getLociAndDistance(snpMap, segMap)
start = np.min(loci)
stop = np.max(loci)
seg = InputOutput.readInSeg(pedigree, args.segfile, start = start, stop = stop)
loci -= start # Re-align to seg file.
for i in range(len(distance)):
segLoc0 = loci[i,0]
segLoc1 = loci[i,1]
peelingInfo.segregation[:,:,i] = distance[i]*seg[:,:,segLoc0] + (1-distance[i])*seg[:,:,segLoc1]
else:
peelingInfo.segregation[:,:,:] = .25
### ACTUAL PROGRAM BELOW
def getArgs() :
parser = argparse.ArgumentParser(description='')
core_parser = parser.add_argument_group("Core arguments")
core_parser.add_argument('-out', required=True, type=str, help='The output file prefix.')
core_peeling_parser = parser.add_argument_group("Mandatory peeling arguments")
core_peeling_parser.add_argument('-runtype', default=None, required=False, type=str, help='Program run type. Either "single" or "multi".')
# Input options
input_parser = parser.add_argument_group("Input Options")
InputOutput.add_arguments_from_dictionary(input_parser, InputOutput.get_input_options(), options = ["bfile", "genotypes", "phasefile", "seqfile", "pedigree", "startsnp", "stopsnp"])
# Output options
output_parser = parser.add_argument_group("Output Options")
output_parser.add_argument('-no_dosages', action='store_true', required=False, help='Flag to suppress the dosage files.')
output_parser.add_argument('-no_seg', action='store_true', required=False, help='Flag to suppress the segregation files (e.g. when running for chip imputation and not hybrid peeling).')
output_parser.add_argument('-no_params', action='store_true', required=False, help='Flag to suppress writing the parameter files.')
output_parser.add_argument('-haps', action='store_true', required=False, help='Flag to enable writing out the genotype probabilities.')
output_parser.add_argument('-calling_threshold', default=None, required=False, type=float, nargs="*", help='Genotype calling threshold(s). Multiple space separated values allowed. Use. .3 for best guess genotype.')
output_parser.add_argument('-binary_call_files', action='store_true', required=False, help='Flag to write out the called genotype files as a binary plink output [Not yet implemented].')
output_parser.add_argument('-call_phase', action='store_true', required=False, help='Flag to call the phase as well as the genotypes.')
InputOutput.add_arguments_from_dictionary(output_parser, InputOutput.get_output_options(), options = ["writekey", "onlykeyed"])
# Multithreading
multithread_parser = parser.add_argument_group("Multithreading Options")
InputOutput.add_arguments_from_dictionary(multithread_parser, InputOutput.get_multithread_options(), options = ["iothreads", "maxthreads"])
peeling_parser = parser.add_argument_group("Optional peeling arguments")
peeling_parser.add_argument('-ncycles',default=5, required=False, type=int, help='Number of peeling cycles. Default: 5.')
peeling_parser.add_argument('-length', default=1.0, required=False, type=float, help='Estimated length of the chromosome in Morgans. [Default 1.00]')
peeling_parser.add_argument('-penetrance', default=None, required=False, type=str, nargs="*", help=argparse.SUPPRESS) #help='An optional external penetrance file. This will overwrite the default penetrance values.')
InputOutput.add_arguments_from_dictionary(peeling_parser, InputOutput.get_probability_options(), options = ["error", "seqerror"])
peeling_control_parser = parser.add_argument_group("Peeling control arguments")
peeling_control_parser.add_argument('-esterrors', action='store_true', required=False, help='Flag to re-estimate the genotyping error rates after each peeling cycle.')
peeling_control_parser.add_argument('-estmaf', action='store_true', required=False, help='Flag to re-estimate the minor allele frequency after each peeling cycle.')
peeling_control_parser.add_argument('-nophasefounders', action='store_true', required=False, help='A flag phase a heterozygous allele in one of the founders (if such an allele can be found).')
peeling_control_parser.add_argument('-sexchrom', action='store_true', required=False, help='A flag to that this is a sex chromosome. Sex needs to be given in the pedigree file. This is currently an experimental option.')
singleLocus_parser = parser.add_argument_group("Hybrid peeling arguments")
singleLocus_parser.add_argument('-mapfile',default=None, required=False, type=str, help='a map file for genotype data.')
singleLocus_parser.add_argument('-segmapfile',default=None, required=False, type=str, help='a map file for the segregation estimates for hybrid peeling.')
singleLocus_parser.add_argument('-segfile',default=None, required=False, type=str, help='A segregation file for hybrid peeling.')
# singleLocus_parser.add_argument('-blocksize',default=100, required=False, type=int, help='The number of markers to impute at once. This changes the memory requirements of the program.')
return InputOutput.parseArgs("AlphaPeel", parser)
def main() :
args = getArgs()
pedigree = Pedigree.Pedigree()
InputOutput.readInPedigreeFromInputs(pedigree, args)
singleLocusMode = args.runtype == "single"
if args.runtype == "multi" and args.segfile :
print("Running in multi-locus mode, external segfile ignored")
peelingInfo = PeelingInfo.createPeelingInfo(pedigree, args, phaseFounder = (not args.nophasefounders))
if singleLocusMode:
print("Generating seg estimates")
generateSingleLocusSegregation(peelingInfo, pedigree, args)
runPeelingCycles(pedigree, peelingInfo, args, singleLocusMode = singleLocusMode)
PeelingIO.writeGenotypes(pedigree, genoProbFunc = peelingInfo.getGenoProbs)
if not args.no_params: PeelingIO.writeOutParamaters(peelingInfo)
if not singleLocusMode and not args.no_seg: InputOutput.writeIdnIndexedMatrix(pedigree, peelingInfo.segregation, args.out + ".seg")
if __name__ == "__main__":
main()
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinypeel.py
|
tinypeel.py
|
import concurrent.futures
from numba import jit, float32, int8, int64, optional, boolean
from numba.experimental import jitclass
import numpy as np
from collections import OrderedDict
from ..tinyhouse import InputOutput
from ..tinyhouse import ProbMath
from ..tinyhouse import HaplotypeOperations
from . import PeelingInfo
import math
#########################################################################
### In this module we will update 3 things: ###
### 1) Our estimate for the MAF. ###
### 2) Our estimate of the locus specific (sequencing) error rate. ###
### 3) Our estimate of the locus specific recombination rate. ###
#########################################################################
# Estimating the minor allele frequency. This update is done by using an iterative approach
# which minimizes the likelihood of the observed genotypes conditional on them having been
# generated from hardy-weinberg equilibrium with a fixed maf value. To speed up, we use
# Newton style updates to re-estimate the minor allele frequency.
# There is math on how to do this... somewhere?
def updateMaf(pedigree, peelingInfo):
if peelingInfo.isSexChrom:
print("Updating error rates and minor allele frequencies for sex chromosomes are not well test and will break in interesting ways. Recommend running without that option.")
maf = np.full(peelingInfo.nLoci, .5, dtype = np.float32)
for i in range(peelingInfo.nLoci):
maf[i] = newtonMafUpdates(peelingInfo, i)
mafGeno = ProbMath.getGenotypesFromMaf(maf)
for ind in pedigree:
if ind.isFounder():
peelingInfo.anterior[ind.idn,:,:] = mafGeno
peelingInfo.maf = maf.astype(np.float32)
def newtonMafUpdates(peelingInfo, index) :
# This function gives an iterative approximation for the minor allele frequency. It uses a maximum of 5 iterations.
maf = 0.5
maf_old = 0.5
iters = 5
converged = False
while not converged:
delta = getNewtonUpdate(maf_old, peelingInfo, index)
maf = maf_old + delta
if maf < 0.01:
maf = 0.01
if maf > .99:
maf = .99
if abs(maf - maf_old) < 0.0001:
converged = True
iters -= 1
if iters < 0:
converged = True
return maf
@jit(nopython=True)
def getNewtonUpdate(p, peelingInfo, index) :
#Log likelihood of the liklihood's first + second derivitives
LLp = 0
LLpp = 0
#I want to add priors. Should be 1 individual of each of the four states.
LLp, LLpp = addIndividualToUpdate(np.array([1, 0, 0, 0], dtype = np.float32), p, LLp, LLpp)
LLp, LLpp = addIndividualToUpdate(np.array([0, 1, 0, 0], dtype = np.float32), p, LLp, LLpp)
LLp, LLpp = addIndividualToUpdate(np.array([0, 0, 1, 0], dtype = np.float32), p, LLp, LLpp)
LLp, LLpp = addIndividualToUpdate(np.array([0, 0, 0, 1], dtype = np.float32), p, LLp, LLpp)
for i in range(peelingInfo.nInd):
if peelingInfo.genotyped[i, index] :
d = peelingInfo.penetrance[i,:,index]
LLp, LLpp = addIndividualToUpdate(d, p, LLp, LLpp)
if LLp == 0 or LLpp == 0: return 0 #Could be a case where no one has data.
return -LLp/LLpp
@jit(nopython = True)
def addIndividualToUpdate(d, p, LLp, LLpp):
d0 = d[0]
d1 = d[1] + d[2]
d2 = d[3]
f = d0*(1-p)**2 + d1*p*(1-p) + d2*p**2
fp = (d1 - 2*d0) + 2*p*(d0 + d2 - d1)
fpp = 2*(d0 + d2 - d1)
LLp += fp/f
LLpp += fpp/f - (fp/f)**2
return LLp, LLpp
# Commenting out the following code. This was used to do updates via grid search.
# @jit(nopython = True)
# def mafLoglikelihood(peelingInfo, maf, index):
# score = 0
# maf_squared = maf**2
# maf_one_minus_maf = maf*(1-maf)
# one_minus_maf_squared = (1-maf)**2
# for i in range(peelingInfo.nInd):
# if peelingInfo.genotyped[i, index] :
# # if True :
# genoProbs = peelingInfo.penetrance[i,:,index]
# prob = 0
# prob += one_minus_maf_squared*genoProbs[0]
# prob += maf_one_minus_maf*genoProbs[1]
# prob += maf_one_minus_maf*genoProbs[2]
# prob += maf_squared*genoProbs[3]
# score += math.log(prob)
# return score
###
### NOTE: The following code updates the genotype and sequencing error rates.
###
def updatePenetrance(pedigree, peelingInfo):
peelingInfo.genoError = updateGenoError(pedigree, peelingInfo)
peelingInfo.seqError = updateSeqError(pedigree, peelingInfo)
if peelingInfo.isSexChrom:
print("Updating error rates and minor allele frequencies for sex chromosomes are not well test and will break in interesting ways. Recommend running without that option.")
for ind in pedigree:
sexChromFlag = peelingInfo.isSexChrom and ind.sex == 0 #This is the sex chromosome and the individual is male.
peelingInfo.penetrance[ind.idn,:,:] = ProbMath.getGenotypeProbabilities(peelingInfo.nLoci, ind.genotypes, ind.reads, peelingInfo.genoError, peelingInfo.seqError, sexChromFlag)
if ind.isGenotypedFounder() and (not InputOutput.args.nophasefounders) and ind.genotypes is not None:
loci = PeelingInfo.getHetMidpoint(ind.genotypes)
if loci is not None:
e = peelingInfo.genoError[loci]
peelingInfo.penetrance[ind.idn,:,loci] = np.array([e/3, e/3, 1-e, e/3], dtype = np.float32)
def updateGenoError(pedigree, peelingInfo) :
# The following is a simple EM update for the genotyping error rate at each locus.
# This update adds the expected number of errors that an individual marginalizing over their current estimate of their genotype probabilities.
# We use a max value of 5% and a min value of .0001 percent to make sure the values are reasonable
counts = np.full(pedigree.nLoci, 1, dtype = np.float32)
errors = np.full(pedigree.nLoci, 0.01, dtype = np.float32)
for ind in pedigree:
updateGenoError_ind(counts, errors, ind.genotypes, peelingInfo.getGenoProbs(ind.idn))
newError = errors/counts
newError = np.maximum(np.minimum(newError, .05), .0001)
return newError
@jit(nopython=True)
def updateGenoError_ind(counts, errors, genotypes, genoProbs):
for i in range(len(counts)) :
if genotypes[i] != 9: # Only include non-missing genotypes.
counts[i] += 1
if genotypes[i] == 0:
errors[i] += (genoProbs[1,i] + genoProbs[2, i] + genoProbs[3, i])
if genotypes[i] == 1:
errors[i] += (genoProbs[0, i] + genoProbs[3, i])
if genotypes[i] == 2:
errors[i] += (genoProbs[0, i] + genoProbs[1,i] + genoProbs[2, i])
def updateSeqError(pedigree, peelingInfo) :
# The following is a simple EM update for the genotyping error rate at each locus.
# This update adds the expected number of errors that an individual has marginalizing over their current genotype probabilities.
# This only uses the homozygotic states, heterozygotic states are ignored (in both the counts + errors terms).
# We use a max value of 5% and a min value of .0001 percent to make sure the values are reasonable
counts = np.full(pedigree.nLoci, 1, dtype = np.float32)
errors = np.full(pedigree.nLoci, 0.001, dtype = np.float32)
for ind in pedigree:
if ind.reads is not None:
updateSeqError_ind(counts, errors, ind.reads[0], ind.reads[1], peelingInfo.getGenoProbs(ind.idn))
newError = errors/counts
newError = np.maximum(np.minimum(newError, .01), .0001)
return newError
@jit(nopython=True)
def updateSeqError_ind(counts, errors, refReads, altReads, genoProbs):
# Errors occur when genotype is 0 and an alternative allele happens.
# Errors occur when genotype is 2 (coded as 3) and a reference allele happens.
# Number of observations is number of reads * probability the individual is homozygous.
for i in range(len(counts)) :
counts[i] += (genoProbs[0,i] + genoProbs[3,i]) * (altReads[i] + refReads[i])
errors[i] += genoProbs[0,i] * altReads[i]
errors[i] += genoProbs[3,i] * refReads[i]
###
### NOTE: The following code is designed to estimate the recombination rate between markers.
### This is currently broken and does not give good estimates. Strongly recommend not using it, and the associated options have been disabled in tinyPeel.py
###
def updateSeg(peelingInfo):
#Idea: Split the chromosome into ~10 blocks with slightly different starts/stops. Estimate at each one and then sum.
#Not sure if this is the best way, but probably will work okay.
distances = [estDistanceFromBreaks(0, peelingInfo.nLoci-1, nBreaks, peelingInfo) for nBreaks in [5, 10, 20]]
distance = np.mean(distances[0:2])
print(distances, ":", distance)
setupTransmission(distance, peelingInfo)
def estDistanceFromBreaks(loc1, loc2, nBreaks, peelingInfo):
breakPoints = np.floor(np.linspace(loc1, loc2, nBreaks)).astype(dtype = np.int64)
distances = [getDistance(breakPoints[i], breakPoints[i+1], peelingInfo) for i in range(nBreaks-1)]
print(nBreaks, ":", distances)
return sum(distances)
def getDistance(loc1, loc2, peelingInfo):
patSeg1 = getSumSeg(loc1, peelingInfo)
patSeg2 = getSumSeg(loc2, peelingInfo)
patValid = ((patSeg1 > .99) | (patSeg1 < .01)) & ((patSeg2 > .99) | (patSeg2 < .01))
# matValid = ((matSeg1 > .99) | (matSeg1 < .01)) & ((matSeg2 > .99) | (matSeg2 < .01))
# patRecomb = np.mean(np.abs(patSeg1[patValid] - patSeg2[patValid]))
# matRecomb = np.mean(np.abs(matSeg1[matValid] - matSeg2[matValid]))
# recomb = (patRecomb + matRecomb)/2
# difference = np.abs(np.round(patSeg1[patValid]) - np.round(patSeg2[patValid]))
# return np.mean(difference)
entropy1 = 1 - (-patSeg1*np.log2(patSeg1) - (1-patSeg1)*np.log2(1-patSeg1))
entropy2 = 1 - (-patSeg2*np.log2(patSeg2) - (1-patSeg2)*np.log2(1-patSeg2))
difference = patSeg1*(1-patSeg2) + (1-patSeg1)*patSeg2
est = np.sum(entropy1*entropy2*difference)/np.sum(entropy1*entropy2)
# return est
return haldane(est)
def getSumSeg(loc, peelingInfo):
seg = peelingInfo.segregation[:, :, loc]
sumSeg = np.sum(seg, 1)
patSeg = (seg[:,2] + seg[:,3])/sumSeg
# matSeg = (seg[:,1] + seg[:,3])/sumSeg
return patSeg
def haldane(difference) :
return -np.log(1.0-2.0*difference)/2.0
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/Peeling/PeelingUpdates.py
|
PeelingUpdates.py
|
import concurrent.futures
from numba import jit, float32, int8, int64, optional, boolean
from numba.experimental import jitclass
import numpy as np
from collections import OrderedDict
from ..tinyhouse import InputOutput
from ..tinyhouse import ProbMath
from ..tinyhouse import HaplotypeOperations
import math
# Defining variables for peel up and peel down. Ideally these would be characters, but numba does not support characters.
PEEL_UP = 0
PEEL_DOWN = 1
# This is the main peeling function.
@jit(nopython=True, nogil=True, locals={'e': float32, 'e4':float32, 'e16':float32, 'e1e':float32})
def peel(family, operation, peelingInfo, singleLocusMode) :
isSexChrom = peelingInfo.isSexChrom
e = .000001
e1e = 1-e
e4 = e/4
e16 = e/16
### Setup local variables from the peeling information container.
anterior = peelingInfo.anterior
penetrance = peelingInfo.penetrance
posterior = peelingInfo.posterior
segregation = peelingInfo.segregation
pointSeg = peelingInfo.pointSeg
segregationTensor = peelingInfo.segregationTensor
segregationTensor_norm = peelingInfo.segregationTensor_norm
nLoci = peelingInfo.nLoci
nOffspring = len(family.offspring)
sire = family.sire
dam = family.dam
fam = family.idn
#Creating variables here:
# childToParents: The projection of each child onto the parent genotypes.
# childSegs: The segregation estimates for a particular child (These are re-used? so need to be stored)
# allToParents: The projection of each child onto the parental genotypes.
# parentsMinustChild: The estimate of the parent's genotypes minus the contribution from a specific individual.
childToParents = np.full((nOffspring, 4, 4, nLoci), 0, dtype = np.float32)
childSegTensor = np.full((nOffspring, 4, 4, 4, nLoci), 0, dtype = np.float32)
allToParents = np.full((4, 4, nLoci), 1.0, dtype = np.float32)
parentsMinusChild = np.full((nOffspring, 4, 4, nLoci), 1, dtype = np.float32)
# Some local variables. currentSeg is the segregation estimate of a child (but may be modified).
currentSeg = np.full((4, nLoci), 1, dtype = np.float32)
#Construct the joint parent genotypes based on the parent's anterior, penetrance, and posterior terms minus this family.
probSire = anterior[sire,:,:]*penetrance[sire,:,:] * peelingInfo.posteriorSire_minusFam[fam,:,:]
probDam = anterior[dam,:,:]*penetrance[dam,:,:] * peelingInfo.posteriorDam_minusFam[fam,:,:]
probSire = probSire/np.sum(probSire, 0)
probDam = probDam/np.sum(probDam, 0)
# Einstien sum notation 1: create the joint parental genotypes based on the probabilities for each parent.
# jointParents = np.einsum("ai, bi -> abi", probSire, probDam)
jointParents = getJointParents(probSire, probDam)
jointParents = jointParents/np.sum(np.sum(jointParents, axis = 0), axis = 0)
jointParents = (1-e)*jointParents + e/16 # There are 4x4 values for each locus in jointparents.
# There are 4 values for each locus. Normalization is done here so that jointParents is as accurate as possible.
# We need the posterior terms here for the peeling up step later.
probSire = probSire*e1e + e4
probDam = probDam*e1e + e4
# Now construct the parental genotypes based on within-family information.
for index in range(nOffspring):
child = family.offspring[index]
# The child's estimate is the combination of the posterior term and penetrance term for that child.
# We are estimating the parent's genotypes so the anterior term is ignored to avoid double counting.
childValues = posterior[child,:,:] * penetrance[child,:,:]
childValues = childValues/np.sum(childValues, axis = 0)
childValues = e1e*childValues + e4
# METHOD 1: Just use the current segregation of the child.
currentSeg[:,:] = segregation[child,:,:]
currentSeg /= np.sum(currentSeg, 0)
# METHOD 2: Use the segregation estimate of the child minus the contribution at a particular locus.
# Currently do not recommend using this.
# if not singleLocusMode :
# currentSeg[:,:] = segregation[child,:,:] / pointSeg[child,:,:]
# currentSeg /= np.sum(currentSeg, 0)
# else:
# currentSeg[:,:] = segregation[child,:,:]
if isSexChrom and peelingInfo.sex[child] == 0: #0 for male, 1 for female.
segregationTensor = peelingInfo.segregationTensorXY
if isSexChrom and peelingInfo.sex[child] == 1: #0 for male, 1 for female.
segregationTensor = peelingInfo.segregationTensorXX
#Einstien sum notation 2: Create the child-specific segregation tensor using the child's currrent segregation estimate.
# childSegTensor[index,:,:,:,:] = np.einsum("abcd, di -> abci", segregationTensor, currentSeg)
createChildSegs(segregationTensor, currentSeg, childSegTensor[index,:,:,:,:])
#Einstien sum notation 3: Estimate the parental genotypes based on the child's genotypes and their segregation tensor.
# childToParents[index,:,:,:] = np.einsum("abci, ci -> abi", childSegTensor[index,:,:,:,:], childValues)
projectChildGenotypes(childSegTensor[index,:,:,:,:], childValues, childToParents[index,:,:,:])
# Method 1: estimate the parents genotype and the child-specific posterior terms using iterative normalizing.
# for i in range(nOffspring) :
# parentsMinusChild[i,:,:,:] = jointParents[:,:,:]
# for i in range(nOffspring):
# allToParents *= childToParents[i,:,:,:]
# allToParents /= np.sum(np.sum(allToParents, axis = 0), axis=0)
# for j in range(nOffspring) :
# if i != j :
# parentsMinusChild[j,:,:,:] *= childToParents[i,:,:,:]
# parentsMinusChild[j,:,:,:] /= np.sum(np.sum(parentsMinusChild[i,:,:,:], axis = 0), axis=0)
##
# Method 2: estimate the parents genotype and the child-specific posterior terms using a log scale.
##
# for i in range(nOffspring) :
# parentsMinusChild[i,:,:,:] = np.log(jointParents[:,:,:])
# allToParents[:,:,:] = 0
# # #taking out post estimates.
# for i in range(nOffspring):
# log_childToParents = np.log(childToParents[i,:,:,:])
# allToParents += log_childToParents
# for j in range(nOffspring) :
# if i != j :
# parentsMinusChild[j,:,:,:] += log_childToParents
# Method 3: estimate the parents genotype and the child-specific posterior terms using a slightly smarter log scale.
for i in range(nOffspring) :
parentsMinusChild[i,:,:,:] = np.log(jointParents[:,:,:])
allToParents[:,:,:] = 0
for i in range(nOffspring):
log_childToParents = np.log(childToParents[i,:,:,:])
allToParents += log_childToParents
parentsMinusChild[i,:,:,:] -= log_childToParents # This is done to take away the setimate for an individual child from their parent's posterior term.
for i in range(nOffspring):
parentsMinusChild[i,:,:,:] += allToParents
# Move from a log-scale to a non-log scale and re-normalize.
allToParents = expNorm2D(allToParents)
for i in range(nOffspring):
parentsMinusChild[i,:,:,:] = expNorm2D(parentsMinusChild[i,:,:,:])
if operation == PEEL_DOWN:
for i in range(nOffspring):
child = family.offspring[i]
#Einstien sum notation 4: Project the parent genotypes down onto the child genotypes.
# anterior[child,:,:] = np.einsum("abci, abi -> ci", childSegTensor[i,:,:,:,:], parentsMinusChild[i,:,:,:])
projectParentGenotypes(childSegTensor[i,:,:,:,:], parentsMinusChild[i,:,:,:], anterior[child,:,:])
anterior[child,:,:] /= np.sum(anterior[child,:,:], 0)
if operation == PEEL_UP :
# Take the allToParents estimate and combine to estimate the sire and dam's posterior estimates (for this family)
sirePosterior = combineAndReduceAxis1(allToParents, probDam)
sirePosterior /= np.sum(sirePosterior, axis = 0)
sirePosterior = sirePosterior*e1e + e4
peelingInfo.posteriorSire_new[fam,:,:] = sirePosterior
damPosterior = combineAndReduceAxis0(allToParents, probSire)
damPosterior /= np.sum(damPosterior, axis = 0)
damPosterior = damPosterior*e1e + e4
peelingInfo.posteriorDam_new[fam,:,:] = damPosterior
if (not singleLocusMode) and (operation == PEEL_DOWN):
# Estimate the segregation probabilities for each child.
for i in range(nOffspring):
# Child values is the same as in the posterior estimation step above.
child = family.offspring[i]
childValues = posterior[child,:,:] * penetrance[child,:,:]
childValues = childValues/np.sum(childValues, axis = 0)
childValues = e1e*childValues + e4
if isSexChrom and peelingInfo.sex[child] == 0: #0 for male, 1 for female.
segregationTensor = peelingInfo.segregationTensorXY
if isSexChrom and peelingInfo.sex[child] == 1: #0 for male, 1 for female.
segregationTensor = peelingInfo.segregationTensorXX
#Einstien sum notation 5:
# pointSeg[child,:,:] = np.einsum("abcd, abi, ci-> di", segregationTensor, parentsMinusChild[i,:,:,:], childValues)
#Option 1: Estimate without normalizing.
# estimateSegregation(segregationTensor, parentsMinusChild[i,:,:,:], childValues, pointSeg[child,:,:])
#Option 2: Estimate with normalizing. I think this is what we want.
estimateSegregationWithNorm(segregationTensor, segregationTensor_norm, parentsMinusChild[i,:,:,:], childValues, pointSeg[child,:,:])
segregation[child,:,:] = (1-e)*collapsePointSeg(pointSeg[child,:,:], peelingInfo.transmissionRate) + e/4
#####
##### The following are a large number of "helper" jit functions that replace the einstien sums in the original scripts.
#####
@jit(nopython=True, nogil=True)
def getJointParents(probSire, probDam):
# jointParents = np.einsum("ai, bi -> abi", probSire, probDam)
nLoci = probSire.shape[1]
output = np.full(shape = (4, 4, nLoci), fill_value = 0, dtype = np.float32)
for a in range(4) :
for b in range(4) :
for i in range(nLoci):
output[a, b, i] = probSire[a,i] * probDam[b,i]
return output
@jit(nopython=True, nogil=True)
def createChildSegs(segregationTensor, currentSeg, output):
# childSegs[index,:,:,:,:] = np.einsum("abcd, di -> abci", segregationTensor, currentSeg)
nLoci = currentSeg.shape[1]
output[:,:,:,:] = 0
for a in range(4) :
for b in range(4) :
for c in range(4) :
for d in range(4) :
for i in range(nLoci):
output[a, b, c, i] += segregationTensor[a, b, c, d]*currentSeg[d,i]
return output
@jit(nopython=True, nogil=True)
def projectChildGenotypes(childSegs, childValues, output):
# childToParents[index,:,:,:] = np.einsum("abci, ci -> abi", childSegs[index,:,:,:,:], childValues)
nLoci = childSegs.shape[3]
output[:,:,:] = 0
for a in range(4) :
for b in range(4) :
for c in range(4) :
for i in range(nLoci):
output[a, b, i] += childSegs[a, b, c, i]*childValues[c,i]
return output
@jit(nopython=True, nogil=True)
def projectParentGenotypes(childSegs, parentValues, output):
# anterior[child,:,:] = np.einsum("abci, abi -> ci", childSegs[i,:,:,:,:], parentsMinusChild[i,:,:,:])
nLoci = childSegs.shape[3]
output[:,:]=0
for a in range(4) :
for b in range(4) :
for c in range(4) :
for i in range(nLoci):
output[c, i] += childSegs[a,b,c,i]*parentValues[a,b,i]
return output
@jit(nopython=True, nogil=True)
def estimateSegregation(segregationTensor, parentValues, childValues, output):
# pointSeg[child,:,:] = np.einsum("abcd, abi, ci-> di", segregationTensor, parentsMinusChild[i,:,:,:], childValues)
nLoci = childValues.shape[1]
output[:,:]=0
for a in range(4) :
for b in range(4) :
for c in range(4) :
for d in range(4) :
for i in range(nLoci):
output[d, i] += segregationTensor[a,b,c,d]*parentValues[a,b,i]*childValues[c,i]
return output
@jit(nopython=True, nogil=True)
def estimateSegregationWithNorm(segregationTensor, segregationTensor_norm, parentValues, childValues, output):
# pointSeg[child,:,:] = np.einsum("abcd, abi, ci-> di", segregationTensor, parentsMinusChild[i,:,:,:], childValues)
nLoci = childValues.shape[1]
output[:,:]=0
for a in range(4) :
for b in range(4) :
for c in range(4) :
for d in range(4) :
for i in range(nLoci):
#Check if norm is 0. Otherwise use norm to normalize.
if segregationTensor_norm[a, b, c] != 0:
output[d, i] += segregationTensor[a,b,c,d]*parentValues[a,b,i]*childValues[c,i]/segregationTensor_norm[a, b, c]
return output
@jit(nopython=True, nogil=True)
def combineAndReduceAxis1(jointEstimate, parentEstimate):
# output = np.einsum("abi, bi-> ai", jointEstimate, parentEstimate)
nLoci = parentEstimate.shape[1]
output = np.full((4, nLoci), 0, dtype = np.float32)
for a in range(4):
for b in range(4) :
for i in range(nLoci):
output[a, i] += jointEstimate[a,b,i]*parentEstimate[b, i]
return output
@jit(nopython=True, nogil=True)
def combineAndReduceAxis0(jointEstimate, parentEstimate):
# output = np.einsum("abi, ai-> bi", jointEstimate, parentEstimate)
nLoci = parentEstimate.shape[1]
output = np.full((4, nLoci), 0, dtype = np.float32)
for a in range(4):
for b in range(4) :
for i in range(nLoci):
output[b, i] += jointEstimate[a,b,i]*parentEstimate[a, i]
return output
@jit(nopython=True, nogil=True)
def expNorm2D(mat):
# Matrix is 4x4xnLoci: Output is to take the exponential of the matrix and normalize each locus. We need to make sure that there are not any overflow values.
nLoci = mat.shape[2]
for i in range(nLoci):
maxVal = 1 # Log of anything between 0-1 will be less than 0. Using 1 as a default.
for a in range(4):
for b in range(4):
if mat[a, b, i] > maxVal or maxVal == 1:
maxVal = mat[a, b, i]
for a in range(4):
for b in range(4):
mat[a, b, i] -= maxVal
# Normalize.
tmp = np.exp(mat)
for i in range(nLoci):
total = 0
for a in range(4):
for b in range(4):
total += tmp[a, b, i]
for a in range(4):
for b in range(4):
tmp[a, b, i] /= total
return tmp
@jit(nopython=True, nogil=True)
def expNorm1D(mat):
# Matrix is 4x4xnLoci: Output is to take the exponential of the matrix and normalize each locus. We need to make sure that there are not any overflow values.
nLoci = mat.shape[1]
for i in range(nLoci):
maxVal = 1 # Log of anything between 0-1 will be less than 0. Using 1 as a default.
for a in range(4):
if mat[a, i] > maxVal or maxVal == 1:
maxVal = mat[a, i]
for a in range(4):
mat[a, i] -= maxVal
tmp = np.exp(mat)
for i in range(nLoci):
total = 0
for a in range(4):
total += tmp[a,i]
for a in range(4):
tmp[a, i] /= total
return tmp
@jit(nopython=True, nogil=True, locals={'e': float32, 'e2':float32, 'e1e':float32, 'e2i':float32})
def collapsePointSeg(pointSeg, transmission):
# This is the forward backward algorithm.
# Segregation estimate state ordering: pp, pm, mp, mm
nLoci = pointSeg.shape[1]
seg = np.full(pointSeg.shape, .25, dtype = np.float32)
for i in range(nLoci):
for j in range(4):
seg[j,i] = pointSeg[j,i]
tmp = np.full((4), 0, dtype = np.float32)
new = np.full((4), 0, dtype = np.float32)
prev = np.full((4), .25, dtype = np.float32)
for i in range(1, nLoci):
e = transmission[i-1]
e2 = e**2
e1e = e*(1-e)
e2i = (1.0-e)**2
for j in range(4):
tmp[j] = prev[j]*pointSeg[j,i-1]
sum_j = 0
for j in range(4):
sum_j += tmp[j]
for j in range(4):
tmp[j] = tmp[j]/sum_j
# ! fm fm fm fm
# !segregationOrder: pp, pm, mp, mm
new[0] = e2*tmp[3] + e1e*(tmp[1] + tmp[2]) + e2i*tmp[0]
new[1] = e2*tmp[2] + e1e*(tmp[0] + tmp[3]) + e2i*tmp[1]
new[2] = e2*tmp[1] + e1e*(tmp[0] + tmp[3]) + e2i*tmp[2]
new[3] = e2*tmp[0] + e1e*(tmp[1] + tmp[2]) + e2i*tmp[3]
# tmp = tmp/np.sum(tmp)
# new = e2i*tmp + e2 + e1e*(tmp[0] + tmp[3])*same + e1e*(tmp[1] + tmp[2])*diff
for j in range(4):
seg[j,i] *= new[j]
# seg[:,i] *= new
prev = new
prev = np.full((4), .25, dtype = np.float32)
for i in range(nLoci-2, -1, -1): #zero indexed then minus one since we skip the boundary.
e = transmission[i]
e2 = e**2
e1e = e*(1-e)
e2i = (1.0-e)**2
for j in range(4):
tmp[j] = prev[j]*pointSeg[j,i+1]
sum_j = 0
for j in range(4):
sum_j += tmp[j]
for j in range(4):
tmp[j] = tmp[j]/sum_j
new[0] = e2*tmp[3] + e1e*(tmp[1] + tmp[2]) + e2i*tmp[0]
new[1] = e2*tmp[2] + e1e*(tmp[0] + tmp[3]) + e2i*tmp[1]
new[2] = e2*tmp[1] + e1e*(tmp[0] + tmp[3]) + e2i*tmp[2]
new[3] = e2*tmp[0] + e1e*(tmp[1] + tmp[2]) + e2i*tmp[3]
for j in range(4):
seg[j,i] *= new[j]
prev = new
for i in range(nLoci):
sum_j = 0
for j in range(4):
sum_j += seg[j, i]
for j in range(4):
seg[j, i] = seg[j, i]/sum_j
# seg = seg/np.sum(seg, 0)
return(seg)
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/Peeling/Peeling.py
|
Peeling.py
|
import concurrent.futures
from numba import jit, float32, int8, int64, optional, boolean
from numba.experimental import jitclass
import numpy as np
from collections import OrderedDict
from ..tinyhouse import InputOutput
from ..tinyhouse import ProbMath
from ..tinyhouse import HaplotypeOperations
import math
#########################################################################
### In this module we define the peeling info object. ###
### This is a just in time container for the various ###
### peeling probability calculations. ###
#########################################################################
def createPeelingInfo(pedigree, args, createSeg=True, phaseFounder = False) :
# NOTE: createSeg is added as an option to decrease memory usage during the single locus peeling steps.
nLoci = pedigree.nLoci
peelingInfo = jit_peelingInformation(nInd=pedigree.maxIdn, nFam=pedigree.maxFam, nLoci=nLoci, createSeg=createSeg)
peelingInfo.isSexChrom = args.sexchrom
# Information about the peeling positions are handled elsewhere.
peelingInfo.positions = None
#Generate the segregation tensors.
peelingInfo.segregationTensor = ProbMath.generateSegregation(e = 1e-06)
peelingInfo.segregationTensor_norm = ProbMath.generateSegregation(e = 1e-06, partial=True) #Partial gives the normalizing constant.
peelingInfo.segregationTensorXY = ProbMath.generateSegregationXYChrom(e = 1e-06)
peelingInfo.segregationTensorXX = ProbMath.generateSegregationXXChrom(e = 1e-06)
peelingInfo.genoError[:] = args.error
peelingInfo.seqError[:] = args.seqerror
setupTransmission(args.length, peelingInfo) #Sets up the transmission rates using a custom position list and a total chromosome length.
for ind in pedigree:
peelingInfo.sex[ind.idn] = ind.sex
if ind.genotypes is not None and ind.haplotypes is not None:
HaplotypeOperations.ind_fillInGenotypesFromPhase(ind)
sexChromFlag = peelingInfo.isSexChrom and ind.sex == 0 #This is the sex chromosome and the individual is male.
peelingInfo.penetrance[ind.idn,:,:] = ProbMath.getGenotypeProbabilities(peelingInfo.nLoci, ind.genotypes, ind.reads, peelingInfo.genoError, peelingInfo.seqError, sexChromFlag)
# Set the genotyping/read status for each individual. This will be used for, e.g., estimating the minor allele frequency.
if ind.genotypes is not None:
setGenotypeStatusGenotypes(ind.idn, ind.genotypes, peelingInfo)
if ind.reads is not None:
setGenotypeStatusReads(ind.idn, ind.reads[0], ind.reads[1], peelingInfo)
if ind.isGenotypedFounder() and phaseFounder and ind.genotypes is not None:
loci = getHetMidpoint(ind.genotypes)
if loci is not None:
e = args.error
peelingInfo.penetrance[ind.idn,:,loci] = np.array([e/3, e/3, 1-e, e/3], dtype = np.float32)
if args.penetrance is not None:
if args.sexchrom:
print("Using an external penetrance file and the sexchrom option is highly discouraged. Please do not use.")
if args.esterrors :
print("External penetrance file included, but esterrors flag used. The two options are incompatible. esterrors set to false.")
args.esterrors = False
for pen in args.penetrance:
addPenetranceFromExternalFile(pedigree, peelingInfo, pen, args)
# updateMaf(pedigree, peelingInfo)
return peelingInfo
def setupTransmission(length, peelingInfo) :
if peelingInfo.positions is None:
localMap = np.linspace(0, 1, num = peelingInfo.nLoci, dtype = np.float32)
else:
localMap = peelingInfo.positions/peelingInfo.positions[-1] #This should be sorted. Need to add in code to check.
for i in range(peelingInfo.nLoci -1):
distance = localMap[i+1] - localMap[i]
distance = distance * length
peelingInfo.transmissionRate[i] = distance
@jit(nopython=True)
def setGenotypeStatusGenotypes(idn, genotypes, peelingInfo):
nLoci = len(genotypes)
if genotypes is not None:
for i in range(nLoci):
peelingInfo.genotyped[idn, i] = peelingInfo.genotyped[idn, i] or genotypes[i] != 9
@jit(nopython=True)
def setGenotypeStatusReads(idn, reads0, reads1, peelingInfo):
nLoci = len(reads0)
if reads0 is not None and reads1 is not None:
for i in range(nLoci):
peelingInfo.genotyped[idn, i] = peelingInfo.genotyped[idn, i] or reads0[i] != 0 or reads1[i] != 0
def addPenetranceFromExternalFile(pedigree, peelingInfo, fileName, args):
# This function allows external penetrance files to be read in and added to the gentoype probabilities for an individual.
print("Reading in penetrance file:", fileName)
with open(fileName) as f:
e = 0
for line in f:
parts = line.split();
idx = parts[0];
parts = parts[1:]
if args.startsnp is not None :
parts = parts[args.startsnp : args.stopsnp+1] #Offset 1 to include stopsnp
penetranceLine=np.array([float(val) for val in parts], dtype = np.float32)
if idx not in pedigree.individuals:
print("Individual", idx, "not found in pedigree. Individual ignored.")
else:
ind = pedigree.individuals[idx]
peelingInfo.penetrance[ind.idn,e,:] *= penetranceLine
# Normalizing in terms of SNPs seems like a really bad idea.
# peelingInfo.penetrance[ind.idn,e,:] /= np.sum(peelingInfo.penetrance[ind.idn,e,:], 0) # Normalization added, just in case.
e = (e+1)%4
@jit(nopython=True)
def getHetMidpoint(geno):
nLoci = len(geno)
midpoint = int(nLoci/2)
index = 0
changed = False
while index < nLoci/2:
if midpoint + index < nLoci:
if geno[midpoint + index] == 1:
return midpoint + index
if midpoint - index >= 0:
if geno[midpoint - index] == 1:
return midpoint - index
index += 1
return None
spec = OrderedDict()
spec['nInd'] = int64
spec['nFam'] = int64
spec['nLoci'] = int64
spec['isSexChrom'] = boolean
spec['sex'] = int64[:]
spec['genotyped'] = boolean[:,:] #Maybe this should be removed?
# Individual terms: Each will be nInd x 4 x nLoci
spec['anterior'] = float32[:,:,:]
spec['posterior'] = float32[:,:,:]
spec['penetrance'] = float32[:,:,:]
spec['segregation'] = optional(float32[:,:,:])
spec['pointSeg'] = optional(float32[:,:,:]) # I think we don't use this any more. Potentially could be dropped.
# Family terms. Each will be nFam x 4 x nLoci
spec['posteriorSire_minusFam'] = float32[:,:,:]
spec['posteriorDam_minusFam'] = float32[:,:,:]
spec['posteriorSire_new'] = float32[:,:,:]
spec['posteriorDam_new'] = float32[:,:,:]
# Segregation tensors. Each of these will be either 4x4x4x4 or 4x4x4
spec['segregationTensor'] = optional(float32[:,:,:,:])
spec['segregationTensor_norm'] = optional(float32[:,:,:]) # Note: This one is a bit smaller.
spec['segregationTensorXX'] = optional(float32[:,:,:,:])
spec['segregationTensorXY'] = optional(float32[:,:,:,:])
# Marker specific rates:
spec['genoError'] = optional(float32[:])
spec['seqError'] = optional(float32[:])
spec['transmissionRate'] = optional(float32[:])
spec['maf'] = optional(float32[:])
spec['positions'] = optional(float32[:]) # Not sure we use this.
spec['iteration'] = int64
@jitclass(spec)
class jit_peelingInformation(object):
def __init__(self, nInd, nFam, nLoci, createSeg=True):
self.iteration = 0
self.nInd = nInd
self.nFam = nFam
self.nLoci = nLoci
self.isSexChrom = False
self.construct(createSeg)
# These are filled in from createPeelingInfo, above.
self.positions = None
self.segregationTensor = None
self.segregationTensor_norm = None
def construct(self, createSeg = True) :
baseValue = .25
self.sex = np.full(self.nInd, 0, dtype = np.int64)
self.genotyped = np.full((self.nInd, self.nLoci), False, dtype = np.bool_)
self.anterior = np.full((self.nInd, 4, self.nLoci), baseValue, dtype = np.float32)
self.posterior = np.full((self.nInd, 4, self.nLoci), baseValue, dtype = np.float32)
self.penetrance = np.full((self.nInd, 4, self.nLoci), baseValue, dtype = np.float32)
self.segregation = np.full((self.nInd, 4, self.nLoci), baseValue, dtype = np.float32)
if createSeg: # Only removes the point seg term since this is not used for single locus peeling.
self.pointSeg = np.full((self.nInd, 4, self.nLoci), baseValue, dtype = np.float32)
else:
self.pointSeg = None
self.posteriorSire_minusFam = np.full((self.nFam, 4, self.nLoci), baseValue, dtype = np.float32)
self.posteriorDam_minusFam = np.full((self.nFam, 4, self.nLoci), baseValue, dtype = np.float32)
self.posteriorSire_new = np.full((self.nFam, 4, self.nLoci), baseValue, dtype = np.float32)
self.posteriorDam_new = np.full((self.nFam, 4, self.nLoci), baseValue, dtype = np.float32)
self.genoError = np.full((self.nLoci), 0, dtype = np.float32)
self.seqError = np.full((self.nLoci), 0, dtype = np.float32)
self.maf = np.full((self.nLoci), .5, dtype = np.float32)
self.transmissionRate = np.full((self.nLoci-1), 0, dtype = np.float32)
def getGenoProbs(self, idn):
genoProbs = self.anterior[idn,:,:]*self.posterior[idn,:,:]*self.penetrance[idn,:,:]
genoProbs = genoProbs/np.sum(genoProbs,0)
return genoProbs
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/Peeling/PeelingInfo.py
|
PeelingInfo.py
|
import numpy as np
from numba import jit
from ..tinyhouse import InputOutput
def readInSeg(pedigree, fileName, start=None, stop = None):
print("Reading in seg file:", fileName)
if start is None: start = 0
if stop is None: stop = pedigree.nLoci
nLoci = stop - start + 1 #Contains stop.
seg = np.full((pedigree.maxIdn, 4, nLoci), .25, dtype = np.float32)
index = 0
fileNColumns = 0
indHit = np.full(pedigree.maxIdn, 0, dtype = np.int64)
with open(fileName) as f:
e = 0
currentInd = None
for line in f:
parts = line.split();
idx = parts[0];
if fileNColumns == 0:
fileNColumns = len(parts)
if fileNColumns != len(parts):
raise ValueError(f"The length of the line is not the expected length. Expected {fileNColumns} got {len(parts)} on individual {idx} and line {e}.")
segLine=np.array([float(val) for val in parts[(start+1):(stop+2)]], dtype = np.float32)
if len(segLine) != nLoci:
raise ValueError(f"The length of the line subsection is not the expected length. Expected {nLoci} got {len(segLine)} on individual {idx} and line {e}.")
if idx not in pedigree.individuals:
print(f"Individual {idx} not found in pedigree. Individual ignored.")
else:
ind = pedigree.individuals[idx]
if e == 0:
currentInd = ind.idx
if currentInd != ind.idx:
raise ValueError(f"Unexpected individual. Expecting individual {currentInd}, but got ind {ind.idx} on value {e}")
seg[ind.idn,e,:] = segLine
e = (e+1)%4
ind.fileIndex['segregation'] = index; index += 1
indHit[ind.idn] += 1
for ind in pedigree:
if indHit[ind.idn] != 4:
print(f"No segregation information found for individual {ind.idx}")
return seg
def writeOutParamaters(peelingInfo) :
args = InputOutput.args
np.savetxt(args.out + ".genoError", peelingInfo.genoError, fmt = "%f")
np.savetxt(args.out + ".seqError", peelingInfo.seqError, fmt = "%f")
# np.savetxt(args.out + ".trans", peelingInfo.transmissionRate, fmt = "%f")
np.savetxt(args.out + ".maf", peelingInfo.maf, fmt = "%f")
def writeGenotypes(pedigree, genoProbFunc) :
args = InputOutput.args
if not args.no_dosages: writeDosages(pedigree, genoProbFunc, args.out + ".dosages")
if args.haps: writeGenoProbs(pedigree, genoProbFunc, args.out + ".haps")
if args.calling_threshold is not None:
for thresh in args.calling_threshold:
if args.binary_call_files : writeBinaryCalledGenotypes(pedigree, genoProbFunc, args.out + ".called." + str(thresh), thresh)
if not args.binary_call_files : writeCalledGenotypes(pedigree, genoProbFunc, args.out + ".called." + str(thresh), thresh)
if args.call_phase:
writeCalledPhase(pedigree, genoProbFunc, args.out + ".called_phase." + str(thresh), thresh)
def writeGenoProbs(pedigree, genoProbFunc, outputFile):
with open(outputFile, 'w+') as f:
for idx, ind in pedigree.writeOrder():
matrix = genoProbFunc(ind.idn)
f.write('\n')
for i in range(matrix.shape[0]) :
f.write(ind.idx + ' ' + ' '.join(map("{:.4f}".format, matrix[i,:])) + '\n')
def writeDosages(pedigree, genoProbFunc, outputFile):
with open(outputFile, 'w+') as f:
for idx, ind in pedigree.writeOrder():
matrix = np.dot(np.array([0,1,1,2]), genoProbFunc(ind.idn))
if InputOutput.args.sexchrom and ind.sex == 0:
matrix *= 2
f.write(ind.idx + ' ' + ' '.join(map("{:.4f}".format, matrix)) + '\n')
def writeCalledGenotypes(pedigree, genoProbFunc, outputFile, thresh):
with open(outputFile, 'w+') as f:
for idx, ind in pedigree.writeOrder():
matrix = genoProbFunc(ind.idn)
matrixCollapsedHets = np.array([matrix[0,:], matrix[1,:] + matrix[2,:], matrix[3,:]], dtype=np.float32)
calledGenotypes = np.argmax(matrixCollapsedHets, axis = 0)
setMissing(calledGenotypes, matrixCollapsedHets, thresh)
if InputOutput.args.sexchrom and ind.sex == 0:
doubleIfNotMissing(calledGenotypes)
f.write(ind.idx + ' ' + ' '.join(map(str, calledGenotypes)) + '\n')
def writeCalledPhase(pedigree, genoProbFunc, outputFile, thresh):
with open(outputFile, 'w+') as f:
for idx, ind in pedigree.writeOrder():
matrix = genoProbFunc(ind.idn)
# Paternal
paternal_probs = np.array([matrix[0,:] + matrix[1,:], matrix[2,:] + matrix[3,:]], dtype=np.float32)
paternal_haplotype = np.argmax(paternal_probs, axis = 0)
setMissing(paternal_haplotype, paternal_probs, thresh)
f.write(ind.idx + ' ' + ' '.join(map(str, paternal_haplotype)) + '\n')
#Maternal
maternal_probs = np.array([matrix[0,:] + matrix[2,:], matrix[1,:] + matrix[3,:]], dtype=np.float32)
maternal_haplotype = np.argmax(maternal_probs, axis = 0)
setMissing(maternal_haplotype, maternal_probs, thresh)
f.write(ind.idx + ' ' + ' '.join(map(str, maternal_haplotype)) + '\n')
def writeBinaryCalledGenotypes(pedigree, genoProbFunc, outputFile, thresh):
for idx, ind in pedigree.writeOrder():
matrix = genoProbFunc(ind.idn)
matrixCollapsedHets = np.array([matrix[0,:], matrix[1,:] + matrix[2,:], matrix[3,:]], dtype=np.float32)
calledGenotypes = np.argmax(matrixCollapsedHets, axis = 0)
setMissing(calledGenotypes, matrixCollapsedHets, thresh)
if InputOutput.args.sexchrom and ind.sex == 0:
doubleIfNotMissing(calledGenotypes)
ind.genotypes = calledGenotypes.astype(np.int8)
InputOutput.writeOutGenotypesPlink(pedigree, outputFile)
@jit(nopython=True)
def doubleIfNotMissing(calledGenotypes):
nLoci = len(calledGenotypes)
for i in range(nLoci):
if calledGenotypes[i] == 1:
calledGenotypes[i] = 2
@jit(nopython=True)
def setMissing(calledGenotypes, matrix, thresh) :
nLoci = len(calledGenotypes)
for i in range(nLoci):
if matrix[calledGenotypes[i],i] < thresh:
calledGenotypes[i] = 9
def fullOutput(pedigree, peelingInfo, args):
InputOutput.writeIdnIndexedMatrix(pedigree, peelingInfo.penetrance, args.out + ".penetrance")
InputOutput.writeIdnIndexedMatrix(pedigree, peelingInfo.anterior, args.out + ".anterior")
InputOutput.writeIdnIndexedMatrix(pedigree, peelingInfo.posterior, args.out + ".posterior")
InputOutput.writeFamIndexedMatrix(pedigree, peelingInfo.posteriorSire_minusFam, args.out + ".posteriorSire_minusFam")
InputOutput.writeFamIndexedMatrix(pedigree, peelingInfo.posteriorDam_minusFam, args.out + ".posteriorDam_minusFam")
InputOutput.writeFamIndexedMatrix(pedigree, peelingInfo.posteriorSire_new, args.out + ".posteriorSire_new")
InputOutput.writeFamIndexedMatrix(pedigree, peelingInfo.posteriorDam_new, args.out + ".posteriorDam_new")
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/Peeling/PeelingIO.py
|
PeelingIO.py
|
import numpy as np
import numba
import sys
from numba import jit, int8, int64, boolean, deferred_type, optional, float32, double
from numba.experimental import jitclass
from collections import OrderedDict
from . import InputOutput
from . import ProbMath
from . import MultiThreadIO
class Family(object):
"""Family is a container for fullsib families"""
def __init__(self, idn, sire, dam, offspring):
self.idn = idn
self.sire = sire
self.dam = dam
self.offspring = offspring
self.generation = max(sire.generation, dam.generation) + 1
# Add this family to both the sire and dam's family.
self.sire.families.append(self)
self.dam.families.append(self)
def addChild(self, child) :
self.offspring.append(child)
def toJit(self):
"""Returns a just in time version of itself with Individuals replaced by id numbers"""
offspring = np.array([child.idn for child in self.offspring])
return jit_Family(self.idn, self.sire.idn, self.dam.idn, offspring)
spec = OrderedDict()
spec['idn'] = int64
spec['sire'] = int64
spec['dam'] = int64
spec['offspring'] = int64[:]
@jitclass(spec)
class jit_Family(object):
def __init__(self, idn, sire, dam, offspring):
self.idn = idn
self.sire = sire
self.dam = dam
self.offspring = offspring.astype(np.int64)
class Individual(object):
def __init__(self, idx, idn) :
self.genotypes = None
self.haplotypes = None
self.dosages = None
self.imputed_genotypes = None
self.imputed_haplotypes = None
self.reads = None
self.longReads = []
# Do we use this?
self.genotypeDosages = None
self.haplotypeDosages = None
self.hapOfOrigin = None
# Info is here to provide other software to add in additional information to an Individual.
self.info = None
#For plant impute. Inbred is either DH or heavily selfed. Ancestors is historical source of the cross (may be more than 2 way so can't handle via pedigree).
self.inbred = False
self.imputationAncestors = [] #This is a list of lists. Either length 0, length 1 or length 2.
self.selfingGeneration = None
self.sire = None
self.dam = None
self.idx = idx # User inputed string identifier
self.idn = idn # ID number assigned by the pedigree
self.fileIndex = dict() # Position of an animal in each file when reading in. Used to make sure Input and Output order are the same.
self.fileIndex["id"] = idx
self.dummy = False
self.offspring = []
self.families = []
self.sex = -1
self.generation = None
self.initHD = False
# used in pythonHMM, but how should this best be coded when already have initHD? Should probably be set when data is read in,
# but need high_density_threshold to be set in the pedigree first
self.is_high_density = False
self.genotypedFounderStatus = None #?
def __eq__(self, other):
return self is other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.idn)
def subset(self, start, stop):
# Take an individual and create an individual that just contains information on those markers.
# It's okay if this wipes other information.
# print(self.__class__)
new_ind = self.__class__(self.idx, self.idn)
if self.genotypes is not None:
new_ind.genotypes = self.genotypes[start:stop].copy() # Maybe could get away with not doing copies... doing them just to be safe.
if self.haplotypes is not None:
new_ind.haplotypes = (self.haplotypes[0][start:stop].copy(), self.haplotypes[1][start:stop].copy())
if self.reads is not None:
new_ind.reads = (self.reads[0][start:stop].copy(), self.reads[1][start:stop].copy())
return new_ind
def getPercentMissing(self):
return np.mean(self.genotypes == 9)
def getGeneration(self):
if self.generation is not None : return self.generation
if self.dam is None:
damGen = -1
else:
damGen = self.dam.getGeneration()
if self.sire is None:
sireGen = -1
else:
sireGen = self.sire.getGeneration()
self.generation = max(sireGen, damGen) + 1
return self.generation
def constructInfo(self, nLoci, genotypes = True, haps = False, reads = False) :
if genotypes and self.genotypes is None:
self.genotypes = np.full(nLoci, 9, dtype = np.int8)
if haps and self.haplotypes is None:
self.haplotypes = (np.full(nLoci, 9, dtype = np.int8), np.full(nLoci, 9, dtype = np.int8))
if reads and self.reads is None:
self.reads = (np.full(nLoci, 0, dtype = np.int64), np.full(nLoci, 0, dtype = np.int64))
def isFounder(self):
return (self.sire is None) and (self.dam is None)
def getGenotypedFounderStatus(self):
# options: 1: "GenotypedFounder", 0:"ChildOfNonGenotyped", 2:"ChildOfGenotyped"
if self.genotypedFounderStatus is None:
if self.isFounder() :
if self.genotypes is None or np.all(self.genotypes == 9):
self.genotypedFounderStatus = 0
else:
self.genotypedFounderStatus = 1
else:
parentStatus = max(self.sire.getGenotypedFounderStatus(), self.sire.getGenotypedFounderStatus())
if parentStatus > 0:
self.genotypedFounderStatus = 2
else:
if self.genotypes is None or np.all(self.genotypes == 9):
self.genotypedFounderStatus = 0
else:
self.genotypedFounderStatus = 1
return self.genotypedFounderStatus
def isGenotypedFounder(self):
return (self.getGenotypedFounderStatus() == 1)
class PlantImputeIndividual(Individual):
"""Simple derived class for AlphaPlantImpute2
with some extra member variables"""
def __init__(self, idx, idn):
super().__init__(idx, idn)
self.founders = []
self.descendants = []
# Not sure of the code source: https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
# Slightly modified.
import re
def sorted_nicely( l , key):
""" Sort the given iterable in the way that humans expect."""
return sorted(l, key = lambda k: alphanum_key(key(k)))
def alphanum_key(k):
convert = lambda text: int(text) if text.isdigit() else text
return [ convert(c) for c in re.split('([0-9]+)', str(k)) ]
class Generation(object):
def __init__(self, number):
self.number = number
self.families = []
self.individuals = []
self.sires = set()
self.dams = set()
self.parents = set()
def add_individual(self, ind):
self.individuals.append(ind)
def add_family(self, fam):
self.families.append(fam)
self.sires.add(fam.sire)
self.dams.add(fam.dam)
self.parents.add(fam.sire)
self.parents.add(fam.dam)
# Note: Individuals are added seperately.
class Pedigree(object):
def __init__(self, fileName = None, constructor = Individual):
self.maxIdn = 0
self.maxFam = 0
self.individuals = dict()
self.families = None
self.constructor = constructor
self.nGenerations = 0
self.generations = None #List of lists
self.truePed = None
self.nLoci = 0
self.startsnp = 0
self.endsnp = self.nLoci
self.referencePanel = [] #This should be an array of haplotypes. Or a dictionary?
self.maf=None #Maf is the frequency of 2s.
# Threshold that determines if an individual is high-density genotyped
self.high_density_threshold = 0.9
if fileName is not None:
self.readInPedigree(fileName)
self.args = None
self.writeOrderList = None
self.allele_coding = None
def reset_families(self):
for ind in self.individuals.values():
ind.families = []
ind.generation = None
self.nGenerations = 0
self.generations = None
self.families = None
for ind in self:
ind.families = []
self.setupFamilies()
def subset(self, start, stop):
new_pedigree = Pedigree(constructor = self.constructor)
new_pedigree.nLoci = stop - start
# Add all of the individuals.
for ind in self:
# Note: ind.subset strips away all of the family information.
new_pedigree[ind.idx] = ind.subset(start, stop)
for ind in self:
if ind.sire is not None:
new_ind = new_pedigree[ind.idx]
new_sire = new_pedigree[ind.sire.idx]
# Add individuals
new_ind.sire = new_sire
new_sire.offspring.append(new_ind)
if ind.dam is not None:
new_ind = new_pedigree[ind.idx]
new_dam = new_pedigree[ind.dam.idx]
# Add individuals
new_ind.dam = new_dam
new_dam.offspring.append(new_ind)
return new_pedigree
def merge(self, new_pedigree, start, stop):
# This just merged genotype, haplotype, and dosage information (if availible).
for ind in self:
new_ind = new_pedigree[ind.idx]
if new_ind.genotypes is not None:
if ind.genotypes is None:
ind.genotypes = np.full(self.nLoci, 9, dtype = np.int8)
ind.genotypes[start:stop] = new_ind.genotypes
if new_ind.dosages is not None:
if ind.dosages is None:
ind.dosages = np.full(self.nLoci, -1, dtype = np.float32)
ind.dosages[start:stop] = new_ind.dosages
if new_ind.haplotypes is not None:
if ind.haplotypes is None:
ind.haplotypes = (np.full(self.nLoci, 9, dtype = np.int8), np.full(self.nLoci, 9, dtype = np.int8))
ind.haplotypes[0][start:stop] = new_ind.haplotypes[0]
ind.haplotypes[1][start:stop] = new_ind.haplotypes[1]
def __len__(self):
return len(self.individuals)
def writeOrder(self):
if self.writeOrderList is None:
inds = [ind for ind in self if (not ind.dummy) and (self.args.writekey in ind.fileIndex)]
self.writeOrderList = sorted_nicely(inds, key = lambda ind: ind.fileIndex[self.args.writekey])
if not self.args.onlykeyed:
indsNoDummyNoFileIndex = [ind for ind in self if (not ind.dummy) and (not self.args.writekey in ind.fileIndex)]
self.writeOrderList.extend(sorted_nicely(indsNoDummyNoFileIndex, key = lambda ind: ind.idx))
dummys = [ind for ind in self if ind.dummy]
self.writeOrderList.extend(sorted_nicely(dummys, key = lambda ind: ind.idx))
for ind in self.writeOrderList :
yield (ind.idx, ind)
def setMaf(self):
"""Calculate minor allele frequencies at each locus"""
# The default values of 1 (maf) and 2 (counts) provide a sensible prior
# For example, a locus where all individuals are missing, the MAF will be 0.5
maf = np.full(self.nLoci, 1, dtype = np.float32)
counts = np.full(self.nLoci, 2, dtype = np.float32)
for ind in self.individuals.values():
if ind.genotypes is not None:
addIfNotMissing(maf, counts, ind.genotypes)
self.maf = maf/counts
def getMissingness(self):
missingness = np.full(self.nLoci, 1, dtype = np.float32)
counts = 0
for ind in self.individuals.values():
if ind.genotypes is not None:
counts += 1
addIfMissing(missingness, ind.genotypes)
return missingness/counts
def set_high_density(self):
"""Set whether each individual is high-density"""
for individual in self:#.individuals.values():
is_high_density = np.mean(individual.genotypes != 9) >= self.high_density_threshold
if is_high_density:
individual.is_high_density = True
def fillIn(self, genotypes = True, haps = False, reads = False):
for individual in self:
individual.constructInfo(self.nLoci, genotypes = True, haps = haps, reads = reads)
def __getitem__(self, key) :
return self.individuals[key]
def __setitem__(self, key, value):
self.individuals[key] = value
def __iter__(self) :
if self.generations is None:
self.setUpGenerations()
for gen in self.generations:
for ind in gen.individuals:
yield ind
def __reversed__(self) :
if self.generations is None:
self.setUpGenerations()
for gen in reversed(self.generations):
for ind in gen.individuals:
yield ind
def sort_individuals(self, individuals):
return {k:v for k, v in sorted(individuals.items(), key = lambda pair: alphanum_key(pair[0]))}
# Generation code
def setUpGenerations(self) :
# Try and make pedigree independent.
self.individuals = self.sort_individuals(self.individuals)
self.nGenerations = 0
#We can't use a simple iterator over self here, becuase __iter__ calls this function.
for idx, ind in self.individuals.items():
gen = ind.getGeneration()
self.nGenerations = max(gen, self.nGenerations)
self.nGenerations += 1 #To account for generation 0.
self.generations = [Generation(i) for i in range(self.nGenerations)]
for idx, ind in self.individuals.items():
gen = ind.getGeneration()
self.generations[gen].add_individual(ind)
#This is really sloppy, but probably not important.
def setupFamilies(self) :
if self.generations is None:
self.setUpGenerations()
self.families = dict()
for ind in self:
if not ind.isFounder():
parents = (ind.sire.idx, ind.dam.idx)
if parents in self.families :
self.families[parents].addChild(ind)
else:
self.families[parents] = Family(self.maxFam, ind.sire, ind.dam, [ind])
self.maxFam += 1
for family in self.families.values():
self.generations[family.generation].add_family(family)
def getFamilies(self, rev = False) :
if self.generations is None:
self.setUpGenerations()
if self.families is None:
self.setupFamilies()
gens = range(0, len(self.generations))
if rev: gens = reversed(gens)
for i in gens:
for family in self.generations[i].families:
yield family
def getIndividual(self, idx) :
if idx not in self.individuals:
self.individuals[idx] = self.constructor(idx, self.maxIdn)
self.maxIdn += 1
self.generations = None
return self.individuals[idx]
def readInPedigree(self, fileName):
with open(fileName) as f:
lines = f.readlines()
pedList = [line.split() for line in lines]
self.readInPedigreeFromList(pedList)
def readInPlantInfo(self, fileName):
with open(fileName) as f:
lines = f.readlines()
for line in lines:
parts = line.split()
idx = parts[0];
if idx not in self.individuals:
self.individuals[idx] = self.constructor(idx, self.maxIdn)
self.maxIdn += 1
ind = self.individuals[idx]
if len(parts) > 1:
if parts[1] == "DH" or parts[1] == "INBRED":
ind.inbred = True
elif parts[1][0] == "S" :
ind.inbred = False
ind.selfingGeneration = int(parts[1][1:])
else:
ind.inbred = False
if len(parts) > 2:
if "|" in line:
first, second = line.split("|")
self.addAncestors(ind, first.split()[2:])
self.addAncestors(ind, second.split())
else:
self.addAncestors(ind, parts[2:])
def addAncestors(self, ind, parts):
ancestors = []
for idx in parts:
if idx not in self.individuals:
self.individuals[idx] = self.constructor(idx, self.maxIdn)
self.maxIdn += 1
ancestor = self.individuals[idx]
ancestors.append(ancestor)
ind.imputationAncestors.append(ancestors)
def readInPedigreeFromList(self, pedList):
index = 0
for parts in pedList :
idx = parts[0]
self.individuals[idx] = self.constructor(idx, self.maxIdn)
self.maxIdn += 1
self.individuals[idx].fileIndex['pedigree'] = index; index += 1
for parts in pedList :
idx = parts[0]
if parts[1] == "0": parts[1] = None
if parts[2] == "0": parts[2] = None
if parts[1] is not None and parts[2] is None:
parts[2] = "MotherOf"+parts[0]
if parts[2] is not None and parts[1] is None:
parts[1] = "FatherOf"+parts[0]
ind = self.individuals[parts[0]]
if parts[1] is not None:
if parts[1] not in self.individuals:
self.individuals[parts[1]] = self.constructor(parts[1], self.maxIdn)
self.maxIdn += 1
self.individuals[parts[1]].fileIndex['pedigree'] = index; index += 1
self.individuals[parts[1]].dummy=True
sire = self.individuals[parts[1]]
ind.sire = sire
sire.offspring.append(ind)
sire.sex = 0
if parts[2] is not None:
if parts[2] not in self.individuals:
self.individuals[parts[2]] = self.constructor(parts[2], self.maxIdn)
self.maxIdn += 1
self.individuals[parts[2]].fileIndex['pedigree'] = index; index += 1
self.individuals[parts[1]].dummy=True
dam = self.individuals[parts[2]]
ind.dam = dam
dam.offspring.append(ind)
dam.sex = 1
# Optional fourth column contains sex OR inbred/outbred status
if len(parts) > 3:
male, female = {'m', '0', 'xy'}, {'f', '1', 'xx'}
inbred, outbred = {'dh', 'inbred'}, {'outbred'}
expected_entries = male | female | inbred | outbred
if parts[3].lower() not in expected_entries:
print(f"ERROR: unexpected entry in pedigree file, fourth field: '{parts[3]}'\nExiting...")
sys.exit(2)
# Sex
if parts[3].lower() in male:
ind.sex = 0
elif parts[3].lower() in female:
ind.sex = 1
# Inbred/DH
if parts[3].lower() in inbred:
ind.inbred = True
elif parts[3].lower() in outbred:
ind.inbred = False
def readInFromPlink(self, idList, pedList, bed, externalPedigree = False):
index = 0
if not externalPedigree:
self.readInPedigreeFromList(pedList)
for i, idx in enumerate(idList):
genotypes=bed[:, i].copy() ##I think this is the right order. Doing the copy to be safe.
nLoci = len(genotypes)
if self.nLoci == 0:
self.nLoci = nLoci
if self.nLoci != nLoci:
print(f"ERROR: incorrect number of loci when reading in plink file. Expected {self.nLoci} got {nLoci}.\nExiting...")
sys.exit(2)
if idx not in self.individuals:
self.individuals[idx] = self.constructor(idx, self.maxIdn)
self.maxIdn += 1
ind = self.individuals[idx]
ind.constructInfo(nLoci, genotypes=True)
ind.genotypes = genotypes
ind.fileIndex['plink'] = index; index += 1
if np.mean(genotypes == 9) < .1 :
ind.initHD = True
def check_line(self, id_data, fileName, idxExpected=None, ncol=None, getInd=True, even_cols=False):
idx, data = id_data
if idxExpected is not None and idx != idxExpected:
print(f"ERROR: Expected individual {idxExpected} but got individual {idx}.\nExiting...")
sys.exit(2)
if ncol is None:
ncol = len(data)
if ncol != len(data):
print(f"ERROR: incorrect number of columns in {fileName}. Expected {ncol} values but got {len(data)} for individual {idx}.\nExiting...")
sys.exit(2)
if even_cols and ncol % 2 != 0:
print(f"ERROR: file {fileName} doesn't contain an even number of allele columns for individual {idx}.\nExiting...")
sys.exit(2)
nLoci = len(data)
if self.nLoci == 0:
self.nLoci = nLoci
if self.nLoci != nLoci:
print(f"ERROR: inconsistent number of markers or alleles in {fileName}. Expected {self.nLoci} got {nLoci}.")
sys.exit(2)
ind = None
if getInd :
ind = self.getIndividual(idx)
return ind, data, ncol
def update_allele_coding(self, alleles):
"""Update allele codings with new alleles
self.allele_coding - array of shape (2, n_loci) such that:
self.allele_coding[0] - array of alleles that are coded as 0
(these are set to the first alleles 'seen')
self.allele_coding[1] - array of alleles that are coded as 1
(these are alleles that are different from
self.allele_coding[0])
alleles - alleles as read in from PLINK file
array of dtype np.bytes_, b'0' is 'missing'
This function is much like finding unique entries in a list:
only add a new item if it is different from those seen before
In this case, only record the first two uniques found,
but also check there are only 2 alleles in total"""
# If allele coding is complete then skip the update
if self.allele_coding_complete():
return
# Update any missing entries in self.allele_coding[0]
mask = self.allele_coding[0] == b'0'
self.allele_coding[0, mask] = alleles[mask]
# Update entries in self.allele_coding[1] if:
# - the alleles have not already been seen in self.allele_coding[0]
# - and the entry (in self.allele_coding[1]) is missing
mask = self.allele_coding[1] == b'0'
mask &= self.allele_coding[0] != alleles
self.allele_coding[1, mask] = alleles[mask]
# Check for > 2 alleles at any loci. These are:
# - alleles that are not missing
# - alleles that are not in either of self.allele_coding[0] or self.allele_coding[1]
mask = alleles != b'0'
mask &= ((alleles != self.allele_coding[0]) & (alleles != self.allele_coding[1])) # poss speedup alleles != self.allele_coding[0] done above
if np.sum(mask) > 0:
print(f'ERROR: more than two alleles found in input file(s) at loci {np.flatnonzero(mask)}\nExiting...')
sys.exit(2)
def allele_coding_complete(self):
"""Check whether the allele coding is complete (contains no missing values)"""
if self.allele_coding is None:
return False
else:
return np.sum(self.allele_coding == b'0') == 0
def decode_alleles(self, alleles):
"""Decode PLINK plain text alleles to AlphaGenes genotypes or haplotypes
handles single individuals - alleles has shape (n_loci*2, )
or multiple individuals - alleles has shape (n_individuals, n_loci*2)"""
# 'Double' self.allele_coding as there are two allele columns at each locus
coding = np.repeat(self.allele_coding, 2, axis=1)
decoded = np.full_like(alleles, b'0', dtype=np.int8)
decoded[alleles == coding[0]] = 0 # set alleles coded as 0
decoded[alleles == coding[1]] = 1 # set alleles coded as 1
decoded[alleles == b'0'] = 9 # convert missing (b'0' -> 9)
# Extract haplotypes
decoded = np.atleast_2d(decoded)
n_haps = decoded.shape[0] * 2
n_loci = decoded.shape[1] // 2
haplotypes = np.full((n_haps, n_loci), 9, dtype=np.int8)
haplotypes[::2] = decoded[:, ::2]
haplotypes[1::2] = decoded[:, 1::2]
# Convert to genotypes
genotypes = decoded[:, ::2] + decoded[:, 1::2]
genotypes[genotypes > 9] = 9 # reset missing values
return genotypes.squeeze(), haplotypes
def encode_alleles(self, haplotypes):
"""Encode haplotypes as PLINK plain text
handles any even number of haplotypes with shape (n_individuals*2, n_loci)"""
assert len(haplotypes) % 2 == 0
assert len(haplotypes[0])== self.nLoci
# 'Double' self.allele_coding as there are two allele columns at each locus in PLINK format
coding = np.repeat(self.allele_coding, 2, axis=1)
# Encoded array is 'reshaped' - one individual per line, each locus is a pair of alleles
encoded = np.full((len(haplotypes)//2, self.nLoci*2), b'0', dtype=np.bytes_)
# 'Splice' haplotypes into (adjacent) pairs of alleles
encoded[:, ::2] = haplotypes[::2]
encoded[:, 1::2] = haplotypes[1::2]
# Encode
mask0 = encoded == b'0' # major alleles (0)
mask1 = encoded == b'1' # minor alleles (1)
mask9 = encoded == b'9' # missing (9)
encoded[mask0] = np.broadcast_to(coding[0], encoded.shape)[mask0]
encoded[mask1] = np.broadcast_to(coding[1], encoded.shape)[mask1]
encoded[mask9] = b'0'
return encoded.squeeze()
def check_allele_coding(self, filename):
"""Check coding is sensible"""
# Monoallelic loci:
# allele_coding[0] filled, but allele_coding[1] unfilled, i.e. coding[1] == b'0'
n_monoallelic = (self.allele_coding[1] == b'0').sum()
# Unusual letters
unusual = ~np.isin(self.allele_coding, [b'A', b'C', b'G', b'T', b'0']) # unexpected letters
if np.sum(unusual) > 0:
letters = ' '.join(np.unique(self.allele_coding[unusual].astype(str)))
print(f'ERROR: unexpected values found in {filename}: [{letters}].\n'
f'Please check the file is in PLINK .ped format\nExiting...')
sys.exit(2)
elif n_monoallelic > 0:
print(f'WARNING: allele coding from {filename} has {n_monoallelic} monoallelic loci')
else:
# Reassuring message if tests pass
print('Allele coding OK')
def readInPed(self, filename, startsnp=None, stopsnp=None, haps=False, update_coding=False):
"""Read in genotypes, and optionally haplotypes, from a PLINK plain text formated file, usually .ped
If update_coding is True, the allele coding is interpreted from the .ped file and any coding
in self.allele_coding is updated (if the coding is incomplete).
Note: to force a full read of the allele coding set self.allele_codine = None first"""
print(f'Reading in PLINK .ped format: {filename}')
# Check the allele coding is to be got from file or is provided via self.allele_coding
if not update_coding and self.allele_coding is None:
raise ValueError('readInPed () called with no allele coding')
data_list = MultiThreadIO.readLinesPlinkPlainTxt(filename, startsnp=startsnp, stopsnp=stopsnp, dtype=np.bytes_)
index = 0
ncol = None
if self.nLoci != 0:
# Temporarilly double nLoci while reading in PLINK plain text formats (two fields per locus)
# otherwise reading of multiple PLINK files results in an 'Incorrect number of values'
# error in check_line()
self.nLoci = self.nLoci * 2
if not self.allele_coding_complete():
if self.allele_coding is None:
print(f'Interpreting allele coding from {filename}')
else:
print(f'Updating allele coding with coding from {filename}')
for value in data_list:
ind, alleles, ncol = self.check_line(value, filename, idxExpected=None, ncol=ncol, even_cols=True)
ind.constructInfo(self.nLoci, genotypes=True)
ind.fileIndex['plink'] = index; index += 1
if update_coding:
# Initialise allele coding array if none exists
# read_or_create = 'Reading' if self.allele_coding is None else 'Updating'
if self.allele_coding is None:
self.allele_coding = np.full((2, self.nLoci//2), b'0', dtype=np.bytes_)
# Update allele codes
# print(f'{read_or_create} allele coding with coding from {filename}')
self.update_allele_coding(alleles[::2]) # first allele in each pair
self.update_allele_coding(alleles[1::2]) # second allele
# Decode haplotypes and genotypes
ind.genotypes, haplotypes = self.decode_alleles(alleles)
if haps:
ind.haplotypes = haplotypes
if np.mean(ind.genotypes == 9) < .1:
ind.initHD = True
# Check allele coding
self.check_allele_coding(filename)
# Reset nLoci back to its undoubled state
self.nLoci = self.nLoci//2
def readInGenotypes(self, fileName, startsnp=None, stopsnp = None):
print("Reading in AlphaGenes format:", fileName)
index = 0
ncol = None
data_list = MultiThreadIO.readLines(fileName, startsnp = startsnp, stopsnp = stopsnp, dtype = np.int8)
for value in data_list:
ind, genotypes, ncol = self.check_line(value, fileName, idxExpected = None, ncol = ncol)
ind.constructInfo(self.nLoci, genotypes=True)
ind.genotypes = genotypes
ind.fileIndex['genotypes'] = index; index += 1
if np.mean(genotypes == 9) < .1 :
ind.initHD = True
def readInReferencePanel(self, fileName, startsnp=None, stopsnp = None):
print("Reading in reference panel:", fileName)
index = 0
ncol = None
data_list = MultiThreadIO.readLines(fileName, startsnp = startsnp, stopsnp = stopsnp, dtype = np.int8)
for value in data_list:
ind, haplotype, ncol = self.check_line(value, fileName, idxExpected = None, ncol = ncol, getInd=False)
self.referencePanel.append(haplotype)
def readInPhase(self, fileName, startsnp=None, stopsnp = None):
index = 0
ncol = None
data_list = MultiThreadIO.readLines(fileName, startsnp = startsnp, stopsnp = stopsnp, dtype = np.int8)
e = 0
currentInd = None
for value in data_list:
if e == 0:
idxExpected = None
else:
idxExpected = currentInd.idx
ind, haplotype, ncol = self.check_line(value, fileName, idxExpected = idxExpected, ncol = ncol)
currentInd = ind
ind.constructInfo(self.nLoci, haps=True)
ind.haplotypes[e][:] = haplotype
e = 1-e
ind.fileIndex['phase'] = index; index += 1
def readInSequence(self, fileName, startsnp=None, stopsnp = None):
index = 0
ncol = None
print("Reading in sequence data :", fileName)
data_list = MultiThreadIO.readLines(fileName, startsnp = startsnp, stopsnp = stopsnp, dtype = np.int64)
e = 0
currentInd = None
for value in data_list:
if e == 0:
idxExpected = None
else:
idxExpected = currentInd.idx
ind, reads, ncol = self.check_line(value, fileName, idxExpected = idxExpected, ncol = ncol)
currentInd = ind
ind.constructInfo(self.nLoci, reads=True)
ind.fileIndex['sequence'] = index; index += 1
ind.reads[e][:] = reads
e = 1-e
def callGenotypes(self, threshold):
for idx, ind in self.writeOrder():
matrix = ProbMath.getGenotypeProbabilities_ind(ind, InputOutput.args)
matrixCollapsedHets = np.array([matrix[0,:], matrix[1,:] + matrix[2,:], matrix[3,:]], dtype=np.float32)
calledGenotypes = np.argmax(matrixCollapsedHets, axis = 0)
setMissing(calledGenotypes, matrixCollapsedHets, threshold)
if InputOutput.args.sexchrom and ind.sex == 0:
doubleIfNotMissing(calledGenotypes)
ind.genotypes = calledGenotypes
def writePedigree(self, outputFile):
with open(outputFile, 'w+') as f:
for ind in self:
sire = "0"
if ind.sire is not None:
sire = ind.sire.idx
dam = "0"
if ind.dam is not None:
dam = ind.dam.idx
f.write(ind.idx + ' ' + sire + ' ' + dam + '\n')
def writeGenotypes(self, outputFile):
data_list = []
for ind in self :
data_list.append( (ind.idx, ind.genotypes) )
MultiThreadIO.writeLines(outputFile, data_list, str)
def writePhase(self, outputFile):
data_list = []
for ind in self:
if ind.haplotypes.ndim == 2: # diploid
data_list.append((ind.idx, ind.haplotypes[0]))
data_list.append((ind.idx, ind.haplotypes[1]))
elif ind.haplotypes.ndim == 1: # haploid
data_list.append((ind.idx, ind.haplotypes))
data_list.append((ind.idx, ind.haplotypes))
MultiThreadIO.writeLines(outputFile, data_list, str)
def writeDosages(self, outputFile):
data_list = []
for ind in self :
if ind.dosages is not None:
data_list.append( (ind.idx, ind.dosages) )
else:
dosages = ind.genotypes.copy()
dosages[dosages == 9] = 1
data_list.append( (ind.idx, dosages) )
MultiThreadIO.writeLines(outputFile, data_list, "{:.4f}".format)
def writeGenotypes_prefil(self, outputFile):
# print("Output is using filled genotypes. Filling missing with a value of 1")
# fillValues = np.full(1, self.nLoci)
print("Output is using filled genotypes. Filling missing with rounded allele frequency")
self.setMaf()
fillValues = np.round(self.maf)
with open(outputFile, 'w+') as f:
for idx, ind in self.individuals.items():
fill(ind.genotypes, fillValues)
self.writeLine(f, ind.idx, ind.genotypes, str)
def writeGenotypesPed(self, outputFile):
"""Write genotypes in PLINK plain text format"""
data_list = []
for ind in self:
# Split genotypes into 'pseudo' haplotypes such that
# the first allele/haplotype of a heterozygous locus is always 0
missing = ind.genotypes == 9
h1 = ind.genotypes//2
h2 = ind.genotypes - h1
h1[missing] = 9
h2[missing] = 9
alleles = self.encode_alleles(np.vstack([h1, h2]))
data_list.append( (ind.idx, alleles) )
MultiThreadIO.writeLinesPlinkPlainTxt(outputFile, data_list)
def writePhasePed(self, outputFile):
"""Write phased data (i.e. haplotypes) in PLINK plain text .ped format"""
data_list = []
for ind in self:
if ind.haplotypes.ndim == 2: # diploid
alleles = self.encode_alleles(ind.haplotypes)
elif ind.haplotypes.ndim == 1: # haploid
diploid = np.vstack([ind.haplotypes, ind.haplotypes])
alleles = self.encode_alleles(diploid)
data_list.append( (ind.idx, alleles) )
MultiThreadIO.writeLinesPlinkPlainTxt(outputFile, data_list)
def writeLine(self, f, idx, data, func) :
f.write(idx + ' ' + ' '.join(map(func, data)) + '\n')
@jit(nopython=True)
def fill(genotypes, fillValue):
for i in range(len(genotypes)):
if genotypes[i] == 9:
genotypes[i] = fillValue[i]
@jit(nopython=True)
def addIfNotMissing(array1, counts, array2):
for i in range(len(array1)):
if array2[i] != 9:
array1[i] += array2[i]
counts[i] += 2
@jit(nopython=True)
def addIfMissing(array1, array2):
for i in range(len(array1)):
if array2[i] == 9:
array1[i] += 1
@jit(nopython=True)
def doubleIfNotMissing(calledGenotypes):
nLoci = len(calledGenotypes)
for i in range(nLoci):
if calledGenotypes[i] == 1:
calledGenotypes[i] = 2
@jit(nopython=True)
def setMissing(calledGenotypes, matrix, thresh) :
nLoci = len(calledGenotypes)
for i in range(nLoci):
if matrix[calledGenotypes[i],i] < thresh:
calledGenotypes[i] = 9
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/Pedigree.py
|
Pedigree.py
|
import sys
import argparse
import re
import numpy as np
from numba import jit
import random
import warnings
alphaplinkpython_avail = False
try:
import alphaplinkpython
from alphaplinkpython import PlinkWriter
alphaplinkpython_avail = True
except ImportError:
alphaplinkpython_avail = False
##Global:
args = None
def getParser(program) :
parser = argparse.ArgumentParser(description='')
core_parser = parser.add_argument_group("Core arguments")
core_parser.add_argument('-out', required=True, type=str, help='The output file prefix.')
addInputFileParser(parser)
#Genotype files.
# output_options_parser = parser.add_argument_group("Output options")
# output_options_parser.add_argument('-writekey', default="id", required=False, type=str, help='Determines the order in which individuals are ordered in the output file based on their order in the corresponding input file. Animals not in the input file are placed at the end of the file and sorted in alphanumeric order. These animals can be surpressed with the "-onlykeyed" option. Options: id, pedigree, genotypes, sequence, segregation. Defualt: id.')
# output_options_parser.add_argument('-onlykeyed', action='store_true', required=False, help='Flag to surpress the animals who are not present in the file used with -outputkey. Also surpresses "dummy" animals.')
if program == "Default":
pass
if program == "AlphaImpute" :
core_impute_parser = parser.add_argument_group("Impute options")
core_impute_parser.add_argument('-no_impute', action='store_true', required=False, help='Flag to read in the files but not perform imputation.')
core_impute_parser.add_argument('-no_phase', action='store_true', required=False, help='Flag to not do HD phasing initially.')
core_impute_parser.add_argument('-maxthreads',default=1, required=False, type=int, help='Number of threads to use. Default: 1.')
core_impute_parser.add_argument('-binaryoutput', action='store_true', required=False, help='Flag to write out the genotypes as a binary plink output.')
if program in ["AlphaPeel", "AlphaMGS", "AlphaCall"]:
probability_parser = parser.add_argument_group("Genotype probability arguments")
add_arguments_from_dictionary(probability_parser, get_probability_options(), None)
if program in ["longreads"]:
longread_parser = parser.add_argument_group("Long read arguments")
longread_parser.add_argument('-longreads', default=None, required=False, type=str, nargs="*", help='A read file.')
if program == "AlphaPlantImpute" :
core_plant_parser = parser.add_argument_group("Mandatory arguments")
core_plant_parser.add_argument('-plantinfo', default=None, required=False, type=str, nargs="*", help='A plant info file.')
if program == "AlphaMGS" :
core_assign_parser = parser.add_argument_group("Core assignement arguments")
core_assign_parser.add_argument('-potentialgrandsires', default=None, required=False, type=str, help='A list of potential dams for each individual.')
core_assign_parser.add_argument('-usemaf', action='store_true', required=False, help='A flag to use the minor allele frequency when constructing genotype estimates for the sire and maternal grandsire. Not recomended for small input pedigrees.')
if program == "AlphaCall":
call_parser = parser.add_argument_group("AlphaCall arguments")
call_parser.add_argument('-threshold', default=None, required=False, type=float, help='Genotype calling threshold. Use. .3 for best guess genotype.')
call_parser.add_argument('-sexchrom', action='store_true', required=False, help='A flag to that this is a sex chromosome. Sex needs to be given in the pedigree file. This is currently an experimental option.')
return parser
def addInputFileParser(parser):
genotype_parser = parser.add_argument_group("Input arguments")
add_arguments_from_dictionary(genotype_parser, get_input_options(), None)
output_options_parser = parser.add_argument_group("Output options")
add_arguments_from_dictionary(output_options_parser, get_output_options(), None)
def get_input_options():
parse_dictionary = dict()
parse_dictionary["bfile"] = lambda parser: parser.add_argument('-bfile', default=None, required=False, type=str, nargs="*", help='A file in plink (binary) format. Only stable on Linux).')
parse_dictionary["genotypes"] = lambda parser: parser.add_argument('-genotypes', default=None, required=False, type=str, nargs="*", help='A file in AlphaGenes format.')
parse_dictionary["reference"] = lambda parser: parser.add_argument('-reference', default=None, required=False, type=str, nargs="*", help='A haplotype reference panel in AlphaGenes format.')
parse_dictionary["seqfile"] = lambda parser: parser.add_argument('-seqfile', default=None, required=False, type=str, nargs="*", help='A sequence data file.')
parse_dictionary["pedigree"] = lambda parser: parser.add_argument('-pedigree',default=None, required=False, type=str, nargs="*", help='A pedigree file in AlphaGenes format.')
parse_dictionary["phasefile"] = lambda parser: parser.add_argument('-phasefile',default=None, required=False, type=str, nargs="*", help='A phase file in AlphaGenes format.')
parse_dictionary["startsnp"] = lambda parser: parser.add_argument('-startsnp',default=None, required=False, type=int, help="The first marker to consider. The first marker in the file is marker '1'. Default: 1.")
parse_dictionary["stopsnp"] = lambda parser: parser.add_argument('-stopsnp',default=None, required=False, type=int, help='The last marker to consider. Default: all markers considered.')
parse_dictionary["seed"] = lambda parser: parser.add_argument('-seed',default=None, required=False, type=int, help='A random seed to use for debugging.')
return parse_dictionary
def get_output_options():
parse_dictionary = dict()
parse_dictionary["writekey"] = lambda parser: parser.add_argument('-writekey', default="id", required=False, type=str, help='Determines the order in which individuals are ordered in the output file based on their order in the corresponding input file. Animals not in the input file are placed at the end of the file and sorted in alphanumeric order. These animals can be surpressed with the "-onlykeyed" option. Options: id, pedigree, genotypes, sequence, segregation. Defualt: id.')
parse_dictionary["onlykeyed"] = lambda parser: parser.add_argument('-onlykeyed', action='store_true', required=False, help='Flag to surpress the animals who are not present in the file used with -outputkey. Also surpresses "dummy" animals.')
parse_dictionary["iothreads"] = lambda parser: parser.add_argument('-iothreads', default=1, required=False, type=int, help='Number of threads to use for io. Default: 1.')
return parse_dictionary
def get_multithread_options():
parse_dictionary = dict()
parse_dictionary["iothreads"] = lambda parser: parser.add_argument('-iothreads', default=1, required=False, type=int, help='Number of threads to use for input and output. Default: 1.')
parse_dictionary["maxthreads"] = lambda parser: parser.add_argument('-maxthreads', default=1, required=False, type=int, help='Maximum number of threads to use for analysis. Default: 1.')
return parse_dictionary
def get_probability_options():
parse_dictionary = dict()
parse_dictionary["error"] = lambda parser: parser.add_argument('-error', default=0.01, required=False, type=float, help='Genotyping error rate. Default: 0.01.')
parse_dictionary["seqerror"] = lambda parser: parser.add_argument('-seqerror', default=0.001, required=False, type=float, help='Assumed sequencing error rate. Default: 0.001.')
parse_dictionary["recombination"] = lambda parser: parser.add_argument('-recomb', default=1, required=False, type=float, help='Recombination rate per chromosome. Default: 1.')
return parse_dictionary
def add_arguments_from_dictionary(parser, arg_dict, options = None):
if options is None:
for key, value in arg_dict.items():
value(parser)
else:
for option in options:
if option in arg_dict:
arg_dict[option](parser)
else:
print("Option not found:", option, arg_dict)
def parseArgs(program, parser = None, no_args = False):
global args
args = rawParseArgs(program, parser, no_args = no_args)
args.program = program
# We want start/stop snp to be in python format (i.e. 0 to n-1).
# Input values are between 1 to n.
try:
if args.startsnp is not None:
args.startsnp -= 1
args.stopsnp -= 1
##Add any necessary code to check args here.
except AttributeError as error:
pass
return args
def rawParseArgs(program, parser = None, no_args = False) :
if parser is None:
parser = getParser(program)
if no_args:
return parser.parse_args(["-out", "out"])
else:
args = sys.argv[1:]
if len(args) == 0 :
parser.print_help(sys.stderr)
sys.exit(1)
if len(args) == 1:
if args[0] in ["-h", "-help", "--help"]:
parser.print_help(sys.stderr)
sys.exit(1)
else:
with open(args[0]) as f:
args = []
for line in f:
if line[0] != "-": line = "-" + line
args.extend(re.split(r"[,|\s|\n]+",line))
for arg in args:
if len(arg) == 0:
args.remove(arg)
for i, arg in enumerate(args):
if len(arg) > 0 and arg[0] == "-":
args[i] = str.lower(arg)
return parser.parse_args(args)
parseArgs("Default", parser = None, no_args = True)
@jit(nopython=True)
def setNumbaSeeds(seed):
np.random.seed(seed)
random.seed(seed)
def readInPedigreeFromInputs(pedigree, args, genotypes=True, haps=False, reads=False, update_coding=False):
# Try catch is incase the program does not have a seed option.
seed = getattr(args, "seed", None)
if seed is not None:
np.random.seed(args.seed)
random.seed(args.seed)
setNumbaSeeds(args.seed)
startsnp = getattr(args, "startsnp", None)
stopsnp = getattr(args, "stopsnp", None)
pedigree.args = args
pedigreeReadIn = False
pedigree_args = getattr(args, "pedigree", None)
if pedigree_args is not None:
pedigreeReadIn = True
for ped in args.pedigree:
pedigree.readInPedigree(ped)
# This gets the attribute from args, but returns None if the atribute is not valid.
genotypes = getattr(args, "genotypes", None)
if genotypes is not None:
for geno in args.genotypes:
pedigree.readInGenotypes(geno, startsnp, stopsnp)
reference = getattr(args, "reference", None)
if reference is not None:
for ref in args.reference:
pedigree.readInReferencePanel(ref, startsnp, stopsnp)
seqfile = getattr(args, "seqfile", None)
if seqfile is not None:
for seq in args.seqfile:
pedigree.readInSequence(seq, startsnp, stopsnp)
phasefile = getattr(args, "phasefile", None)
if phasefile is not None:
if args.program == "AlphaPeel":
print("Use of an external phase file is not currently supported. Phase information will be translated to genotype probabilities. If absolutely necessary use a penetrance file instead.")
for phase in args.phasefile:
pedigree.readInPhase(phase, startsnp, stopsnp)
bfile = getattr(args, "bfile", None)
if bfile is not None:
global alphaplinkpython_avail
if alphaplinkpython_avail:
for plink in args.bfile:
if pedigreeReadIn == True:
print(f"Pedigree file read in from -pedigree option. Reading in binary plink file {plink}. Pedigree information in the .fam file will be ignored.")
readInGenotypesPlink(pedigree, plink, startsnp, stopsnp, pedigreeReadIn)
else:
warnings.warn("The module alphaplinkpython was not found. Plink files cannot be read in and will be ignored.")
# Note: need to read .bim before .ped as the .bim sets the allele coding to use for reading the .ped
bim = getattr(args, 'bim', None)
if bim is not None:
for file in args.bim:
pedigree.readInBim(file, startsnp, stopsnp)
ped = getattr(args, 'ped', None)
if ped is not None:
for file in args.ped:
pedigree.readInPed(file, startsnp, stopsnp, haps=False, update_coding=update_coding)
#It's important that these happen after all the datafiles are read in.
#Each read in can add individuals. This information needs to be calculated on the final pedigree.
pedigree.fillIn(genotypes, haps, reads)
pedigree.setUpGenerations()
pedigree.setupFamilies()
def readMapFile(mapFile, start = None, stop = None) :
ids = []
chrs = []
positions = []
with open(mapFile) as f:
for line in f:
parts = line.split();
try:
positions.append(float(parts[2]))
chrs.append(parts[1])
ids.append(parts[0])
except ValueError:
pass
if start is None:
start = 0
stop = len(ids)
return (ids[start:stop], chrs[start:stop], positions[start:stop])
# def getPositionsFromMap(mapFileName, nLoci) :
# if mapFileName is None:
# return np.arange(nLoci, dtype = np.float32)
# positions = []
# with open(mapFileName) as f:
# for line in f:
# parts = line.split();
# try:
# positions.append(float(parts[1]))
# except ValueError:
# pass
# if len(positions) != nLoci :
# raise ValueError(f"Number of loci not equal to map file length {nLoci}, {len(positions)}")
# return np.array(positions, dtype = np.float32)
def readInSeg(pedigree, fileName, start=None, stop = None):
print("Reading in seg file:", fileName)
if start is None: start = 0
if stop is None: stop = pedigree.nLoci
nLoci = stop - start + 1 #Contains stop.
print(pedigree.maxIdn)
seg = np.full((pedigree.maxIdn, 4, nLoci), .25, dtype = np.float32)
index = 0
fileNColumns = 0
indHit = np.full(pedigree.maxIdn, 0, dtype = np.int64)
with open(fileName) as f:
e = 0
currentInd = None
for line in f:
parts = line.split();
idx = parts[0];
if fileNColumns == 0:
fileNColumns = len(parts)
if fileNColumns != len(parts):
raise ValueError(f"The length of the line is not the expected length. Expected {fileNColumns} got {len(parts)} on individual {idx} and line {e}.")
segLine=np.array([float(val) for val in parts[(start+1):(stop+2)]], dtype = np.float32)
if len(segLine) != nLoci:
raise ValueError(f"The length of the line subsection is not the expected length. Expected {nLoci} got {len(segLine)} on individual {idx} and line {e}.")
if idx not in pedigree.individuals:
print(f"Individual {idx} not found in pedigree. Individual ignored.")
else:
ind = pedigree.individuals[idx]
if e == 0:
currentInd = ind.idx
if currentInd != ind.idx:
raise ValueError(f"Unexpected individual. Expecting individual {currentInd}, but got ind {ind.idx} on value {e}")
seg[ind.idn,e,:] = segLine
e = (e+1)%4
ind.fileIndex['segregation'] = index; index += 1
indHit[ind.idn] += 1
for ind in pedigree:
if indHit[ind.idn] != 4:
print(f"No segregation information found for individual {ind.idx}")
return seg
def writeIdnIndexedMatrix(pedigree, matrix, outputFile):
np.set_printoptions(suppress=True)
print("Writing to ", outputFile)
with open(outputFile, 'w+') as f:
for idx, ind in pedigree.writeOrder():
if len(matrix.shape) == 2 :
tmp = np.around(matrix[ind.idn, :], decimals = 4)
f.write(' '.join(map(str, tmp)))
# f.write('\n')
if len(matrix.shape) == 3 :
for i in range(matrix.shape[1]) :
f.write(idx + " ")
tmp2 = map("{:.4f}".format, matrix[ind.idn,i, :].tolist())
tmp3 = ' '.join(tmp2)
f.write(tmp3)
f.write('\n')
# f.write('\n')
def writeFamIndexedMatrix(pedigree, matrix, outputFile):
np.set_printoptions(suppress=True)
print("Writing to ", outputFile)
with open(outputFile, 'w+') as f:
for fam in pedigree.getFamilies():
if len(matrix.shape) == 2 :
tmp = np.around(matrix[fam.idn, :], decimals = 4)
f.write(' '.join(map(str, tmp)))
# f.write('\n')
if len(matrix.shape) == 3 :
for i in range(matrix.shape[1]) :
f.write(str(fam.idn) + " ")
tmp2 = map("{:.4f}".format, matrix[fam.idn,i, :].tolist())
tmp3 = ' '.join(tmp2)
f.write(tmp3)
f.write('\n')
# f.write('\n')
def writeOutGenotypesPlink(pedigree, fileName):
global alphaplinkpython_avail
if alphaplinkpython_avail:
import alphaplinkpython
from alphaplinkpython import PlinkWriter
ped = [getFamString(ind) for ind in pedigree]
nLoci = pedigree.nLoci
nInd = len(pedigree.individuals)
genotypes = np.full((nLoci, nInd), 0, dtype = np.int8)
for i, ind in enumerate(pedigree):
genotypes[:,i] = ind.genotypes
genotypeIds = ["snp" + str(i+1) for i in range(nLoci)]
genotypePos = [i + 1 for i in range(nLoci)]
if args.startsnp is not None:
genotypeIds = ["snp" + str(i + args.startsnp + 1) for i in range(nLoci)]
genotypePos = [i + args.startsnp + 1 for i in range(nLoci)]
PlinkWriter.writeFamFile(fileName + ".fam", ped)
# PlinkWriter.writeBimFile(genotypeIds, fileName + ".bim")
writeSimpleBim(genotypeIds, genotypePos, fileName + ".bim")
PlinkWriter.writeBedFile(genotypes, fileName + ".bed")
else:
warnings.warn("The module alphaplinkpython was not found. Plink files cannot be written out and will be ignored.")
def writeSimpleBim(genotypeIds, genotypePos, fileName) :
with open(fileName, "w") as file:
for i in range(len(genotypeIds)):
line = f"1 {genotypeIds[i]} 0 {genotypePos[i]} A B \n"
file.write(line)
def readInGenotypesPlink(pedigree, fileName, startsnp, endsnp, externalPedigree = False):
bim = PlinkWriter.readBimFile(fileName + '.bim')
fam = PlinkWriter.readFamFile(fileName + '.fam')
bed = PlinkWriter.readBedFile(fileName + '.bed', bim, fam)
if startsnp is not None:
bed = bed[startsnp:endsnp,:]
pedList = [[line.getId(), line.getSire(), line.getDam()] for line in fam]
idList = [line.getId() for line in fam]
pedigree.readInFromPlink(idList, pedList, bed, externalPedigree)
def getFamString(ind):
sireStr = 0
damStr = 0
if ind.sire is not None:
sireStr = ind.sire.idx
if ind.dam is not None:
damStr = ind.dam.idx
return [str(ind.idx), str(sireStr), str(damStr)]
# return [str(ind.idx).encode('utf-8'), str(sireStr).encode('utf-8'), str(damStr).encode('utf-8')]
# @profile
# def writeIdnIndexedMatrix2(pedigree, matrix, outputFile):
# np.set_printoptions(suppress=True)
# print("Writing to ", outputFile)
# with open(outputFile, 'w+') as f:
# for idx, ind in pedigree.individuals.items():
# if len(matrix.shape) == 2 :
# tmp = np.around(matrix[ind.idn, :], decimals = 4)
# f.write(' '.join(map(str, tmp)))
# f.write('\n')
# if len(matrix.shape) == 3 :
# for i in range(matrix.shape[1]) :
# tmp2 = tuple(map("{:.4f}".format, matrix[ind.idn,i, :]))
# tmp3 = ' '.join(tmp2)
# f.write(tmp3)
# f.write('\n')
def print_boilerplate(name, version = None, commit = None, date = None):
"""Print software name, version and contact"""
width = 42 # width of 'website' line
print('-' * width)
print(f'{name:^{width}}') # centre aligned
print('-' * width)
if version is not None:
print(f'Version: {version}')
if commit is not None:
print(f'Commit: {commit}')
if date is not None:
print(f'Date: {date}')
print('Email: [email protected]')
print('Website: http://alphagenes.roslin.ed.ac.uk')
print('-' * width)
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/InputOutput.py
|
InputOutput.py
|
from numba import jit
import numpy as np
from . import NumbaUtils
from . import ProbMath
class HaploidMarkovModel :
def __init__(self, n_loci, error, recombination_rate = None):
self.update_paramaters(n_loci, error, recombination_rate)
self.directional_smoothing = self.create_directional_smoothing()
self.apply_smoothing = self.create_apply_smoothing()
self.apply_viterbi = self.create_viterbi_algorithm()
self.apply_sampling = self.create_sampling_algorithm(NumbaUtils.multinomial_sample)
def update_paramaters(self, n_loci, error, recombination_rate = None):
self.n_loci = n_loci
if type(error) is float:
self.error = np.full(n_loci, error, dtype=np.float32)
else:
self.error = error
if recombination_rate is None:
recombination_rate = 1.0/n_loci
if type(recombination_rate) is float:
self.recombination_rate = np.full(n_loci, recombination_rate, dtype=np.float32)
else:
self.recombination_rate = recombination_rate
def get_mask(self, called_haplotypes):
return np.all(called_haplotypes != 9, axis = 0)
# def get_run_option(default_arg, alternative_arg):
# # Return the default arg as true if it is supplied, otherwise return the alternative arg.
# if default_arg is not None:
# if alternative_arg is not None:
# if default_arg and alternative_arg:
# print("Both arguments are true, returning default")
# if not default_arg and not alternative_arg:
# print("Both arguments are false, returning default")
# return default_arg
# else:
# if alternative_arg is None:
# return True
# else:
# return not alternative_arg
def run_HMM(self, point_estimates = None, algorithm = "marginalize", **kwargs):
# return_called_values = get_run_option(return_called_values, return_genotype_probabilities)
if point_estimates is None:
point_estimates = self.get_point_estimates(**kwargs)
if algorithm == "marginalize":
total_probs = self.apply_smoothing(point_estimates, self.recombination_rate)
genotype_probabilities = self.calculate_genotype_probabilities(total_probs, **kwargs)
elif algorithm == "viterbi":
total_probs = self.apply_viterbi(point_estimates, self.recombination_rate)
genotype_probabilities = self.calculate_genotype_probabilities(total_probs, **kwargs)
elif algorithm == "sample":
total_probs = self.apply_sampling(point_estimates, self.recombination_rate)
genotype_probabilities = self.calculate_genotype_probabilities(total_probs, **kwargs)
else:
print(f"Valid alrogithm option not given: {alrogithm}")
return genotype_probabilities
def call_genotype_probabilities(self, genotype_probabilities, threshold = 0.1):
return ProbMath.call_genotype_probs(genotype_probabilities, threshold)
def get_point_estimates(self, individual, haplotype_library, library_calling_threshold = 0.95, **kwargs) :
called_haplotypes = haplotype_library.get_called_haplotypes(threshold = library_calling_threshold)
mask = self.get_mask(called_haplotypes)
point_estimates = self.njit_get_point_estimates(individual.genotypes, called_haplotypes, self.error, mask)
return point_estimates
@staticmethod
@jit(nopython=True, nogil=True)
def njit_get_point_estimates(genotypes, haplotypes, error, mask):
nHap, nLoci = haplotypes.shape
point_estimates = np.full((nLoci, nHap), 1, dtype = np.float32)
for i in range(nLoci):
if genotypes[i] != 9 and mask[i]:
for j in range(nHap):
sourceGeno = haplotypes[j, i]
if 2*sourceGeno == genotypes[i]:
point_estimates[i, j] = 1-error[i]
else:
point_estimates[i, j] = error[i]
return point_estimates
@staticmethod
@jit(nopython=True, nogil=True)
def transmission(cummulative_probabilities, previous_point_probability, recombination_rate, output):
output[:] = cummulative_probabilities * previous_point_probability
normalize(output)
output[:] *= (1-recombination_rate)
output[:] += recombination_rate
def create_directional_smoothing(self) :
transmission = self.transmission
@jit(nopython=True, nogil=True)
def directional_smoothing(point_estimate, recombination_rate, forward = False, backward = False):
output = np.full(point_estimate.shape, 1, dtype = np.float32)
n_loci = point_estimate.shape[0]
if forward:
start = 1
stop = n_loci
step = 1
if backward:
start = n_loci - 2
stop = -1
step = -1
for i in range(start, stop, step):
transmission(output[i-step,:], point_estimate[i - step,:], recombination_rate[i], output[i,:])
return output
return directional_smoothing
def create_apply_smoothing(self):
directional_smoothing = self.directional_smoothing
@jit(nopython=True, nogil=True)
def apply_smoothing(point_estimate, recombination_rate):
"""Calculate normalized state probabilities at each loci using the forward-backward algorithm"""
est = ( point_estimate *
directional_smoothing(point_estimate, recombination_rate, forward = True) *
directional_smoothing(point_estimate, recombination_rate, backward = True) )
# Return normalized probabilities
normalize_along_first_axis(est)
return est
return apply_smoothing
def create_sampling_algorithm(self, selection_function):
directional_smoothing = self.directional_smoothing
transmission = self.transmission
@jit(nopython=True, nogil=True)
def sample_path(point_estimate, recombination_rate):
"""Calculate normalized state probabilities at each loci using the forward-backward algorithm"""
# Right now using a matrix output; will improve later.
n_loci = point_estimate.shape[0]
output = np.full(point_estimate.shape, 0, dtype = np.float32)
forward_and_point_estimate = point_estimate * directional_smoothing(point_estimate, recombination_rate, forward = True)
# First index.
selected_index = selection_function(forward_and_point_estimate[-1].ravel())
output[- 1].ravel()[selected_index] = 1 # Set the output value at the selected_index to 1.
# Always sample backward (for tradition mostly).
locus_estimate = np.full(point_estimate[0].shape, 0, dtype = np.float32)
matrix_ones = np.full(point_estimate[0].shape, 1, dtype = np.float32)
start = n_loci - 2
stop = -1
step = -1
for i in range(start, stop, step):
# Pass along sampled value at the locus.
transmission(output[i-step,:], matrix_ones, recombination_rate[i], locus_estimate)
# Combine forward_estimate with backward_estimate
locus_estimate *= forward_and_point_estimate[i,:]
selected_index = selection_function(locus_estimate.ravel())
output[i].ravel()[selected_index] = 1 # Set the output value at the selected_index to 1.
# Return probabilities
return output
return sample_path
def create_viterbi_algorithm(self):
maximum_likelihood_step = self.maximum_likelihood_step
@jit(nopython=True, nogil=True)
def viterbi_path(point_estimate, recombination_rate):
"""Calculate normalized state probabilities at each loci using the forward-backward algorithm"""
# Right now using a matrix output; will improve later.
n_loci = point_estimate.shape[0]
path_score = np.full(point_estimate.shape, 0, dtype = np.float32)
previous_index = np.full(point_estimate.shape, 0, dtype = np.int64)
output = np.full(point_estimate.shape, 0, dtype = np.float32)
path_score[0] = point_estimate[0]
start = 1; stop = n_loci; step = 1
for i in range(start, stop, step):
# Pass along sampled value at the locus.
maximum_likelihood_step(path_score[i-step], recombination_rate[i], point_estimate[i], path_score[i], previous_index[i])
# Traceback
start_index = np.argmax(path_score[-1])
output[n_loci-1].ravel()[start_index] = 1
index = start_index
start = n_loci-2; stop = -1; step = -1
for i in range(start, stop, step):
index = previous_index[i-step].ravel()[index]
output[i].ravel()[index] = 1
return output
return viterbi_path
@staticmethod
@jit(nopython=True, nogil=True)
def maximum_likelihood_step(previous_path_score, recombination_rate, point_estimate, output_path_score, output_index):
best_index = np.argmax(previous_path_score)
best_score = previous_path_score[best_index]
n_hap = previous_path_score.shape[0]
for i in range(n_hap):
no_rec_score = (1-recombination_rate)*previous_path_score[i]
rec_score = best_score*recombination_rate
if no_rec_score > rec_score:
# No recombination
output_path_score[i] = no_rec_score*point_estimate[i]
output_index[i] = i
else:
# Recombination
output_path_score[i] = rec_score/n_hap*point_estimate[i]
output_index[i] = best_index
output_path_score /= np.sum(output_path_score)
def calculate_genotype_probabilities(self, total_probs, haplotype_library, **kwargs):
haplotype_dosages = haplotype_library.get_haplotypes()
return self.njit_calculate_genotype_probabilities(total_probs, haplotype_dosages)
@staticmethod
@jit(nopython=True, nogil=True)
def njit_calculate_genotype_probabilities(total_probs, reference_haplotypes) :
n_hap, n_loci = reference_haplotypes.shape
geno_probs = np.full((2, n_loci), 0.0000001, dtype = np.float32) # Adding a very small value as a prior incase all of the values are missing.
for i in range(n_loci):
for j in range(n_hap):
hap_value = reference_haplotypes[j, i]
prob_value = total_probs[i,j]
if hap_value != 9:
# Add in a sum of total_probs values.
geno_probs[0, i] += prob_value * (1-hap_value)
geno_probs[1, i] += prob_value * hap_value
geno_probs = geno_probs/np.sum(geno_probs, axis = 0)
return geno_probs
class DiploidMarkovModel(HaploidMarkovModel) :
def __init__(self, n_loci, error, recombination_rate = None):
HaploidMarkovModel.__init__(self, n_loci, error, recombination_rate)
def extract_reference_panels(self, haplotype_library = None, paternal_haplotype_library = None, maternal_haplotype_library = None) :
if maternal_haplotype_library is not None and paternal_haplotype_library is not None:
seperate_reference_panels = True
return paternal_haplotype_library, maternal_haplotype_library, seperate_reference_panels
else:
seperate_reference_panels = False
return haplotype_library, haplotype_library, seperate_reference_panels
def get_point_estimates(self, individual, library_calling_threshold= 0.95, **kwargs):
paternal_haplotype_library, maternal_haplotype_library, seperate_reference_panels = self.extract_reference_panels(**kwargs)
paternal_called_haplotypes = paternal_haplotype_library.get_called_haplotypes(threshold = library_calling_threshold)
maternal_called_haplotypes = maternal_haplotype_library.get_called_haplotypes(threshold = library_calling_threshold)
mask = self.get_mask(paternal_called_haplotypes) & self.get_mask(maternal_called_haplotypes)
return self.njit_get_point_estimates(individual.genotypes, paternal_called_haplotypes, maternal_called_haplotypes, self.error, mask)
@staticmethod
@jit(nopython=True, nogil=True)
def njit_get_point_estimates(indGeno, paternalHaplotypes, maternalHaplotypes, error, mask):
nPat, nLoci = paternalHaplotypes.shape
nMat, nLoci = maternalHaplotypes.shape
point_estimates = np.full((nLoci, nPat, nMat), 1, dtype = np.float32)
for i in range(nLoci):
if indGeno[i] != 9 and mask[i]:
for j in range(nPat):
for k in range(nMat):
sourceGeno = paternalHaplotypes[j, i] + maternalHaplotypes[k, i]
if sourceGeno == indGeno[i]:
point_estimates[i, j, k] = 1-error[i]
else:
point_estimates[i, j, k] = error[i]
return point_estimates
def calculate_genotype_probabilities(self, total_probs, haplotype_library = None, paternal_haplotype_library= None, maternal_haplotype_library= None, **kwargs):
paternal_haplotype_library, maternal_haplotype_library, seperate_reference_panels = self.extract_reference_panels(haplotype_library, paternal_haplotype_library, maternal_haplotype_library)
return self.njit_calculate_genotype_probabilities(total_probs, paternal_haplotype_library.get_haplotypes(), maternal_haplotype_library.get_haplotypes(), seperate_reference_panels)
@staticmethod
@jit(nopython=True, nogil=True)
def njit_calculate_genotype_probabilities(total_probs, paternal_haplotypes, maternal_haplotypes, seperate_reference_panels) :
n_pat, n_loci = paternal_haplotypes.shape
n_mat, n_loci = maternal_haplotypes.shape
geno_probs = np.full((4, n_loci), 0.00001, dtype = np.float32)
for i in range(n_loci):
for j in range(n_pat):
for k in range(n_mat):
# diploid case where the markers are assumed independent.
if seperate_reference_panels or j != k:
pat_value = paternal_haplotypes[j, i]
mat_value = maternal_haplotypes[k, i]
prob_value = total_probs[i,j,k]
if pat_value != 9 and mat_value != 9:
# Add in a sum of total_probs values.
geno_probs[0, i] += prob_value * (1-pat_value)*(1-mat_value) #aa
geno_probs[1, i] += prob_value * (1-pat_value)*mat_value #aA
geno_probs[2, i] += prob_value * pat_value*(1-mat_value) #Aa
geno_probs[3, i] += prob_value * pat_value*mat_value #AA
# Haploid case where the markers are not independent
else:
hap_value = paternal_haplotypes[j, i]
prob_value = total_probs[i,j,k]
if hap_value != 9:
geno_probs[0, i] += prob_value * (1-hap_value)
geno_probs[1, i] += 0
geno_probs[2, i] += 0
geno_probs[3, i] += prob_value * hap_value
geno_probs = geno_probs/np.sum(geno_probs, axis = 0)
return geno_probs
@staticmethod
@jit(nopython=True, nogil=True)
def transmission(cummulative_probabilities, previous_point_probability, recombination_rate, output):
output[:] = cummulative_probabilities * previous_point_probability
normalize(output)
row_sums = np.sum(output, 0)
col_sums = np.sum(output, 1)
output[:] *= (1 - recombination_rate)**2 # No recombination on either chromosome.
output[:] += np.expand_dims(row_sums, 0)/output.shape[0]*recombination_rate*(1-recombination_rate) # recombination on the maternal (second) chromosome)
output[:] += np.expand_dims(col_sums, 1)/output.shape[1]*recombination_rate*(1-recombination_rate) # recombination on the paternal (first) chromosome)
output[:] += recombination_rate**2/output.size # double recombination
@staticmethod
@jit(nopython=True, nogil=True)
def maximum_likelihood_step(previous_path_score, recombination_rate, point_estimate, output_path_score, output_index):
n_pat = previous_path_score.shape[0]
n_mat = previous_path_score.shape[1]
combined_max_index = np.argmax(previous_path_score)
combined_max_score = previous_path_score.ravel()[combined_max_index] * recombination_rate**2/(n_mat*n_pat)
paternal_max_index = np.full(n_pat, 0, dtype = np.int64)
paternal_max_value = np.full(n_pat, 0, dtype = np.float32)
maternal_max_index = np.full(n_mat, 0, dtype = np.int64)
maternal_max_value = np.full(n_mat, 0, dtype = np.float32)
# Recombination on the maternal side, paternal side is fixed
for i in range(n_pat):
index = np.argmax(previous_path_score[i,:])
paternal_max_value[i] = previous_path_score[i, index] * (1-recombination_rate)*recombination_rate/n_mat
paternal_max_index[i] = i*n_mat + index
# Recombination on the paternal side, maternal side is fixed
for j in range(n_mat):
index = np.argmax(previous_path_score[:, j])
maternal_max_value[j] = previous_path_score[index, j] * (1-recombination_rate)*recombination_rate/n_pat
maternal_max_index[j] = index*n_mat + j
for i in range(n_pat):
for j in range(n_mat):
best_score = (1-recombination_rate)**2*previous_path_score[i,j]
best_index = i*n_mat + j
# Paternal recombination
if paternal_max_value[i] > best_score:
best_score = paternal_max_value[i]
best_index = paternal_max_index[i]
if maternal_max_value[j] > best_score:
best_score = maternal_max_value[j]
best_index = maternal_max_index[j]
if combined_max_score > best_score:
best_score = combined_max_score
best_index = combined_max_index
output_path_score[i,j] = best_score*point_estimate[i,j]
output_index[i,j] = best_index
output_path_score /= np.sum(output_path_score)
class JointMarkovModel(HaploidMarkovModel) :
def __init__(self, n_loci, error, recombination_rate = None):
HaploidMarkovModel.__init__(self, n_loci, error, recombination_rate)
@staticmethod
@jit(nopython=True, nogil=True)
def njit_get_point_estimates(indGeno, haplotypes, error, mask):
n_hap, n_loci = haplotypes.shape
point_estimates = np.full((n_loci, n_hap, n_hap + 1), 1, dtype = np.float32)
diploid_section = point_estimates[:,:,0:-1]
haploid_section = point_estimates[:,:,-1]
# Diploid point estimates/Emission probabilities
for i in range(n_loci):
if indGeno[i] != 9 and mask[i]:
for j in range(n_hap):
for k in range(n_hap):
sourceGeno = haplotypes[j, i] + haplotypes[k, i]
if sourceGeno == indGeno[i]:
diploid_section[i, j, k] = 1-error[i]
else:
diploid_section[i, j, k] = error[i]
# Diploid point estimates/Emission probabilities
for i in range(n_loci):
if indGeno[i] != 9 and mask[i]:
for j in range(n_hap):
sourceGeno = 2*haplotypes[j, i]
if sourceGeno == indGeno[i]:
haploid_section[i, j] = 1-error[i]
else:
haploid_section[i, j] = error[i]
return point_estimates
@staticmethod
@jit(nopython=True, nogil=True)
def njit_calculate_genotype_probabilities(total_probs, reference_haplotypes) :
n_hap, n_loci = reference_haplotypes.shape
geno_probs = np.full((4, n_loci), 0.00001, dtype = np.float32)
diploid_section = total_probs[:,:,0:-1]
haploid_section = total_probs[:,:,-1]
for i in range(n_loci):
for j in range(n_hap):
for k in range(n_hap):
# diploid case where the markers are assumed independent.
if j != k:
pat_value = reference_haplotypes[j, i]
mat_value = reference_haplotypes[k, i]
prob_value = diploid_section[i,j,k]
if pat_value != 9 and mat_value != 9:
# Add in a sum of total_probs values.
geno_probs[0, i] += prob_value * (1-pat_value)*(1-mat_value) #aa
geno_probs[1, i] += prob_value * (1-pat_value)*mat_value #aA
geno_probs[2, i] += prob_value * pat_value*(1-mat_value) #Aa
geno_probs[3, i] += prob_value * pat_value*mat_value #AA
# markers are not independent
else:
hap_value = reference_haplotypes[j, i]
prob_value = diploid_section[i,j,k]
if hap_value != 9:
geno_probs[0, i] += prob_value * (1-hap_value)
geno_probs[1, i] += 0
geno_probs[2, i] += 0
geno_probs[3, i] += prob_value * hap_value
for i in range(n_loci):
for j in range(n_hap):
hap_value = reference_haplotypes[j, i]
prob_value = haploid_section[i,j]
if hap_value != 9:
geno_probs[0, i] += prob_value * (1-hap_value)
geno_probs[1, i] += 0
geno_probs[2, i] += 0
geno_probs[3, i] += prob_value * hap_value
geno_probs = geno_probs/np.sum(geno_probs, axis = 0)
return geno_probs
@staticmethod
@jit(nopython=True, nogil=True)
def transmission(cummulative_probabilities, previous_point_probability, recombination_rate, output):
output[:] = cummulative_probabilities * previous_point_probability
normalize(output)
diploid_section = output[:,0:-1]
haploid_section = output[:,-1]
diploid_weight = np.sum(diploid_section)
haploid_weight = np.sum(haploid_section)
row_sums = np.sum(diploid_section, 0)
col_sums = np.sum(diploid_section, 1)
diploid_section[:] *= (1 - recombination_rate)**2
diploid_section[:] += np.expand_dims(row_sums, 0)/diploid_section.shape[0]*recombination_rate*(1-recombination_rate) # recombination on the maternal (second) chromosome)
diploid_section[:] += np.expand_dims(col_sums, 1)/diploid_section.shape[1]*recombination_rate*(1-recombination_rate) # recombination on the paternal (first) chromosome)
diploid_section[:] += diploid_weight*recombination_rate**2/diploid_section.size # double recombination
haploid_section[:] *= (1 - recombination_rate)
haploid_section[:] += haploid_weight*recombination_rate/haploid_section.size
# loose the recombination to the haploid section and add the haploid recombination to diploid
diploid_section[:] *= (1 - recombination_rate)
diploid_section[:] += recombination_rate * haploid_weight/diploid_section.size
# loose the recombination to the haploid section and add the haploid recombination to diploid
haploid_section[:] *= (1 - recombination_rate)
haploid_section[:] += recombination_rate * diploid_weight/haploid_section.size
# @staticmethod
# @jit(nopython=True, nogil=True)
# def maximum_likelihood_step(previous_path_score, recombination_rate, point_estimate, output_path_score, output_index):
# n_pat = previous_path_score.shape[0]
# n_mat = previous_path_score.shape[1]
# combined_max_index = np.argmax(previous_path_score)
# combined_max_score = previous_path_score.ravel()[combined_max_index] * recombination_rate**2/(n_mat*n_pat)
# paternal_max_index = np.full(n_pat, 0, dtype = np.int64)
# paternal_max_value = np.full(n_pat, 0, dtype = np.float32)
# maternal_max_index = np.full(n_mat, 0, dtype = np.int64)
# maternal_max_value = np.full(n_mat, 0, dtype = np.float32)
# # Recombination on the maternal side, paternal side is fixed
# for i in range(n_pat):
# index = np.argmax(previous_path_score[i,:])
# paternal_max_value[i] = previous_path_score[i, index] * (1-recombination_rate)*recombination_rate/n_mat
# paternal_max_index[i] = i*n_mat + index
# # Recombination on the paternal side, maternal side is fixed
# for j in range(n_mat):
# index = np.argmax(previous_path_score[:, j])
# maternal_max_value[j] = previous_path_score[index, j] * (1-recombination_rate)*recombination_rate/n_pat
# maternal_max_index[j] = index*n_mat + j
# for i in range(n_pat):
# for j in range(n_mat):
# best_score = (1-recombination_rate)**2*previous_path_score[i,j]
# best_index = i*n_mat + j
# # Paternal recombination
# if paternal_max_value[i] > best_score:
# best_score = paternal_max_value[i]
# best_index = paternal_max_index[i]
# if maternal_max_value[j] > best_score:
# best_score = maternal_max_value[j]
# best_index = maternal_max_index[j]
# if combined_max_score > best_score:
# best_score = combined_max_score
# best_index = combined_max_index
# output_path_score[i,j] = best_score*point_estimate[i,j]
# output_index[i,j] = best_index
# output_path_score /= np.sum(output_path_score)
@jit(nopython=True, nogil=True)
def normalize(mat):
mat[:] /= np.sum(mat)
@jit(nopython=True, nogil=True)
def normalize_along_first_axis(mat):
for i in range(mat.shape[0]):
normalize(mat[i,:])
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/CombinedHMM.py
|
CombinedHMM.py
|
from numba import jit
import numpy as np
import collections
def getGenotypesFromMaf(maf) :
nLoci = len(maf)
mafGenotypes = np.full((4, nLoci), .25, dtype = np.float32)
mafGenotypes[0,:] = (1-maf)**2
mafGenotypes[1,:] = maf*(1-maf)
mafGenotypes[2,:] = (1-maf)*maf
mafGenotypes[3,:] = maf**2
return mafGenotypes
def getGenotypeProbabilities_ind(ind, args = None, log = False):
if args is None:
error = 0.01
seqError = 0.001
sexChromFlag = False
else:
error = args.error
seqError = args.seqerror
sexChromFlag = getattr(args, "sexchrom", False) and ind.sex == 0 #This is the sex chromosome and the individual is male.
if ind.reads is not None:
nLoci = len(ind.reads[0])
if ind.genotypes is not None:
nLoci = len(ind.genotypes)
if not log:
return getGenotypeProbabilities(nLoci, ind.genotypes, ind.reads, error, seqError, sexChromFlag)
else:
return getGenotypeProbabilities_log(nLoci, ind.genotypes, ind.reads, error, seqError, sexChromFlag)
def getGenotypeProbabilities(nLoci, genotypes, reads, error = 0.01, seqError = 0.001, useSexChrom=False):
vals = np.full((4, nLoci), .25, dtype = np.float32)
if type(error) is float:
error = np.full(nLoci, error)
if type(seqError) is float:
seqError = np.full(nLoci, seqError)
errorMat = generateErrorMat(error)
if genotypes is not None:
setGenoProbsFromGenotypes(genotypes, errorMat, vals)
if reads is not None:
seqError = seqError
log1 = np.log(1-seqError)
log2 = np.log(.5)
loge = np.log(seqError)
valSeq = np.array([log1*reads[0] + loge*reads[1],
log2*reads[0] + log2*reads[1],
log2*reads[0] + log2*reads[1],
log1*reads[1] + loge*reads[0]])
maxVals = np.amax(valSeq, 0)
valSeq = valSeq - maxVals
valSeq = np.exp(valSeq)
vals *= valSeq
if useSexChrom:
#Recode so we only care about the two homozygous states, but they are coded as 0, 1.
vals[1,:] = vals[3,:]
vals[2,:] = 0
vals[3,:] = 0
return vals/np.sum(vals,0)
def getGenotypeProbabilities_log(nLoci, genotypes, reads, error = 0.01, seqError = 0.001, useSexChrom=False):
vals = np.full((4, nLoci), .25, dtype = np.float32)
if type(error) is float:
error = np.full(nLoci, error)
if type(seqError) is float:
seqError = np.full(nLoci, seqError)
errorMat = generateErrorMat(error)
if genotypes is not None:
setGenoProbsFromGenotypes(genotypes, errorMat, vals)
vals = np.log(vals)
if reads is not None:
log1 = np.log(1-seqError)
log2 = np.log(.5)
loge = np.log(seqError)
ref_reads = reads[0]
alt_reads = reads[1]
val_seq = np.full((4, nLoci), 0, dtype = np.float32)
val_seq[0,:] = log1*ref_reads + loge*alt_reads
val_seq[1,:] = log2*ref_reads + log2*alt_reads
val_seq[2,:] = log2*ref_reads + log2*alt_reads
val_seq[3,:] = loge*ref_reads + log1*alt_reads
vals += val_seq
output = np.full((4, nLoci), 0, dtype = np.float32)
apply_log_norm_1d(vals, output)
return output
@jit(nopython=True, nogil = True)
def apply_log_norm_1d(vals, output):
nLoci = vals.shape[-1]
for i in range(nLoci):
output[:,i] = log_norm_1D(vals[:, i])
@jit(nopython=True, nogil = True)
def log_norm_1D(mat):
log_exp_sum = 0
first = True
maxVal = 100
for a in range(4):
if mat[a] > maxVal or first:
maxVal = mat[a]
if first:
first = False
for a in range(4):
log_exp_sum += np.exp(mat[a] - maxVal)
return mat - (np.log(log_exp_sum) + maxVal)
def set_from_genotype_probs(ind, geno_probs = None, calling_threshold = 0.1, set_genotypes = False, set_dosages = False, set_haplotypes = False) :
# Check diploid geno_probs; not sure what to do for haploid except assume inbred?
if geno_probs.shape[0] == 2:
geno_probs = geno_probs/np.sum(geno_probs, axis = 0)
called_values = call_genotype_probs(geno_probs, calling_threshold)
# Assuming the individual is haploid
if set_dosages:
if ind.dosages is None:
ind.dosages = called_values.dosages.copy()
ind.dosages[:] = 2*called_values.dosages
if set_genotypes:
ind.genotypes[:] = 2*called_values.haplotypes
ind.genotypes[called_values.haplotypes == 9] = 9 # Correctly set missing loci.
if set_haplotypes:
ind.haplotypes[0][:] = called_values.haplotypes
ind.haplotypes[1][:] = called_values.haplotypes
if geno_probs.shape[0] == 4:
geno_probs = geno_probs/np.sum(geno_probs, axis = 0)
called_values = call_genotype_probs(geno_probs, calling_threshold)
if set_dosages:
if ind.dosages is None:
ind.dosages = called_values.dosages.copy()
ind.dosages[:] = called_values.dosages
if set_genotypes:
ind.genotypes[:] = called_values.genotypes
if set_haplotypes:
ind.haplotypes[0][:] = called_values.haplotypes[0]
ind.haplotypes[1][:] = called_values.haplotypes[1]
def call_genotype_probs(geno_probs, calling_threshold = 0.1) :
if geno_probs.shape[0] == 2:
# Haploid
HaploidValues = collections.namedtuple("HaploidValues", ["haplotypes", "dosages"])
dosages = geno_probs[1,:].copy()
haplotypes = call_matrix(geno_probs, calling_threshold)
return HaploidValues(dosages = dosages, haplotypes = haplotypes)
if geno_probs.shape[0] == 4:
# Diploid
DiploidValues = collections.namedtuple("DiploidValues", ["genotypes", "haplotypes", "dosages"])
dosages = geno_probs[1,:] + geno_probs[2,:] + 2*geno_probs[3,:]
# Collapse the two heterozygous states into one.
collapsed_hets = np.array([geno_probs[0,:], geno_probs[1,:] + geno_probs[2,:], geno_probs[3,:]], dtype=np.float32)
genotypes = call_matrix(collapsed_hets, calling_threshold)
# aa + aA, Aa + AA
haplotype_0 = np.array([geno_probs[0,:] + geno_probs[1,:], geno_probs[2,:] + geno_probs[3,:]], dtype=np.float32)
haplotype_1 = np.array([geno_probs[0,:] + geno_probs[2,:], geno_probs[1,:] + geno_probs[3,:]], dtype=np.float32)
haplotypes = (call_matrix(haplotype_0, calling_threshold), call_matrix(haplotype_1, calling_threshold))
return DiploidValues(dosages = dosages, haplotypes = haplotypes, genotypes = genotypes)
def call_matrix(matrix, threshold):
called_genotypes = np.argmax(matrix, axis = 0)
setMissing(called_genotypes, matrix, threshold)
return called_genotypes.astype(np.int8)
@jit(nopython=True)
def setMissing(calledGenotypes, matrix, threshold) :
nLoci = len(calledGenotypes)
for i in range(nLoci):
if matrix[calledGenotypes[i],i] < threshold:
calledGenotypes[i] = 9
@jit(nopython=True)
def setGenoProbsFromGenotypes(genotypes, errorMat, vals):
nLoci = len(genotypes)
for i in range(nLoci) :
if genotypes[i] != 9:
vals[:, i] = errorMat[genotypes[i], :, i]
def generateErrorMat(error) :
errorMat = np.array([[1-error, error/2, error/2, error/2],
[error/2, 1-error, 1-error, error/2],
[error/2, error/2, error/2, 1-error]], dtype = np.float32)
errorMat = errorMat/np.sum(errorMat, 1)[:,None]
return errorMat
def generateSegregationXXChrom(partial=False, e= 1e-06) :
paternalTransmission = np.array([ [1, 1, 0, 0],[0, 0, 1, 1]])
maternalTransmission = np.array([ [1, 0, 1, 0],[0, 1, 0, 1]])
fatherAlleleCoding = np.array([0, 0, 1, 1])
motherAlleleCoding = np.array([0, 1, 0, 1])
# ! fm fm fm fm
# !segregationOrder: pp, pm, mp, mm
segregationTensor = np.zeros((4, 4, 4, 4))
for segregation in range(4):
#Change so that father always passes on the maternal allele?
if(segregation == 0) :
father = maternalTransmission
mother = paternalTransmission
if(segregation == 1) :
father = maternalTransmission
mother = maternalTransmission
if(segregation == 2) :
father = maternalTransmission
mother = paternalTransmission
if(segregation == 3) :
father = maternalTransmission
mother = maternalTransmission
# !alleles: aa, aA, Aa, AA
for allele in range(4) :
segregationTensor[:, :, allele, segregation] = np.outer(father[fatherAlleleCoding[allele]], mother[motherAlleleCoding[allele]])
segregationTensor = segregationTensor*(1-e) + e/4 #trace has 4 times as many elements as it should since it has 4 internal reps.
segregationTensor = segregationTensor.astype(np.float32)
return(segregationTensor)
def generateSegregationXYChrom(partial=False, e= 1e-06) :
paternalTransmission = np.array([ [1, 1, 0, 0],[0, 0, 1, 1]])
maternalTransmission = np.array([ [1, 0, 1, 0],[0, 1, 0, 1]])
motherAlleleCoding = np.array([0, 1, 0, 1])
# ! fm fm fm fm
# !segregationOrder: pp, pm, mp, mm
#They don't get anything from the father -- father is always 0
segregationTensor = np.zeros((4, 4, 4, 4))
for segregation in range(4):
if(segregation == 0) :
mother = paternalTransmission
if(segregation == 1) :
mother = maternalTransmission
if(segregation == 2) :
mother = paternalTransmission
if(segregation == 3) :
mother = maternalTransmission
# !alleles: aa, aA, Aa, AA
for allele in range(4) :
for fatherAllele in range(4):
segregationTensor[fatherAllele, :, allele, segregation] = mother[motherAlleleCoding[allele]]
segregationTensor = segregationTensor*(1-e) + e/4 #trace has 4 times as many elements as it should since it has 4 internal reps.
segregationTensor = segregationTensor.astype(np.float32)
return(segregationTensor)
def generateSegregation(partial=False, e= 1e-06) :
paternalTransmission = np.array([ [1, 1, 0, 0],[0, 0, 1, 1]])
maternalTransmission = np.array([ [1, 0, 1, 0],[0, 1, 0, 1]])
fatherAlleleCoding = np.array([0, 0, 1, 1])
motherAlleleCoding = np.array([0, 1, 0, 1])
# ! fm fm fm fm
# !segregationOrder: pp, pm, mp, mm
segregationTensor = np.zeros((4, 4, 4, 4))
for segregation in range(4):
if(segregation == 0) :
father = paternalTransmission
mother = paternalTransmission
if(segregation == 1) :
father = paternalTransmission
mother = maternalTransmission
if(segregation == 2) :
father = maternalTransmission
mother = paternalTransmission
if(segregation == 3) :
father = maternalTransmission
mother = maternalTransmission
# !alleles: aa, aA, Aa, AA
for allele in range(4) :
segregationTensor[:, :, allele, segregation] = np.outer(father[fatherAlleleCoding[allele]], mother[motherAlleleCoding[allele]])
if partial : segregationTensor = np.mean(segregationTensor, 3)
segregationTensor = segregationTensor*(1-e) + e/4 #trace has 4 times as many elements as it should since it has 4 internal reps.
segregationTensor = segregationTensor.astype(np.float32)
return(segregationTensor)
# def generateErrorMat(error) :
# # errorMat = np.array([[1-error*3/4, error/4, error/4, error/4],
# # [error/4, .5-error/4, .5-error/4, error/4],
# # [error/4, error/4, error/4, 1-error*3/4]], dtype = np.float32)
# errorMat = np.array([[1-error*2/3, error/3, error/3, error/3],
# [error/3, 1-error*2/3, 1-error*2/3, error/3],
# [error/3, error/3, error/3, 1-error*2/3]], dtype = np.float32)
# errorMat = errorMat/np.sum(errorMat, 1)[:,None]
# return errorMat
## Not sure if below is ever used.
# def generateTransmission(error) :
# paternalTransmission = np.array([ [1-error, 1.-error, error, error],
# [error, error, 1-error, 1-error]])
# maternalTransmission = np.array([ [1.-error, error, 1.-error, error],
# [error, 1-error, error, 1-error]] )
# segregationTransmissionMatrix = np.zeros((4,4))
# segregationTransmissionMatrix[0,:] = paternalTransmission[0,:]
# segregationTransmissionMatrix[1,:] = paternalTransmission[0,:]
# segregationTransmissionMatrix[2,:] = paternalTransmission[1,:]
# segregationTransmissionMatrix[3,:] = paternalTransmission[1,:]
# segregationTransmissionMatrix[:,0] = segregationTransmissionMatrix[:,0] * maternalTransmission[0,:]
# segregationTransmissionMatrix[:,1] = segregationTransmissionMatrix[:,1] * maternalTransmission[1,:]
# segregationTransmissionMatrix[:,2] = segregationTransmissionMatrix[:,2] * maternalTransmission[0,:]
# segregationTransmissionMatrix[:,3] = segregationTransmissionMatrix[:,3] * maternalTransmission[1,:]
# segregationTransmissionMatrix = segregationTransmissionMatrix.astype(np.float32)
# return(segregationTransmissionMatrix)
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/ProbMath.py
|
ProbMath.py
|
from numba import jit
import numpy as np
from . import ProbMath
from . import NumbaUtils
from . HaplotypeLibrary import haplotype_from_indices
def diploidHMM(individual, paternal_haplotypes, maternal_haplotypes, error, recombination_rate, calling_method='dosages', use_called_haps=True, include_geno_probs=False):
n_loci = len(individual.genotypes)
# !!!! NEED TO MAKE SURE SOURCE HAPLOTYPES ARE ALL NON MISSING!!!
if type(paternal_haplotypes) is list or type(paternal_haplotypes) is tuple:
paternal_haplotypes = np.array(paternal_haplotypes)
if type(maternal_haplotypes) is list or type(maternal_haplotypes) is tuple:
maternal_haplotypes = np.array(maternal_haplotypes)
# Expand error and recombinationRate to arrays as may need to have
# marker specific error/recombination rates.
if type(error) is float:
error = np.full(n_loci, error, dtype=np.float32)
if type(recombination_rate) is float:
recombination_rate = np.full(n_loci, recombination_rate, dtype=np.float32)
# Construct penetrance values (point estimates)
if use_called_haps:
point_estimates = getDiploidPointEstimates(individual.genotypes, individual.haplotypes[0], individual.haplotypes[1], paternal_haplotypes, maternal_haplotypes, error)
elif calling_method in ('sample', 'dosages', 'Viterbi'):
n_pat = len(paternal_haplotypes)
n_mat = len(maternal_haplotypes)
point_estimates = np.ones((n_loci, n_pat, n_mat), dtype=np.float32)
getDiploidPointEstimates_geno(individual.genotypes, paternal_haplotypes, maternal_haplotypes, error, point_estimates)
else:
probs = ProbMath.getGenotypeProbabilities_ind(individual)
point_estimates = getDiploidPointEstimates_probs(probs, paternal_haplotypes, maternal_haplotypes, error)
# Do 'sample' and 'Viterbi' before other 'calling_method' as we don't need the forward-backward probs
if calling_method == 'sample':
haplotypes = getDiploidSample(point_estimates, recombination_rate, paternal_haplotypes, maternal_haplotypes)
individual.imputed_haplotypes = haplotypes
return
if calling_method == 'Viterbi':
haplotypes = get_viterbi(point_estimates, recombination_rate, paternal_haplotypes, maternal_haplotypes)
individual.imputed_haplotypes = haplotypes
return
# Run forward-backward algorithm on penetrance values
total_probs = diploidForwardBackward(point_estimates, recombination_rate)
if calling_method == 'dosages':
dosages = getDiploidDosages(total_probs, paternal_haplotypes, maternal_haplotypes)
individual.dosages = dosages
if calling_method == 'probabilities':
values = getDiploidProbabilities(total_probs, paternal_haplotypes, maternal_haplotypes)
individual.info = values
if calling_method == 'callhaps':
raise ValueError('callhaps not yet implimented.')
@jit(nopython=True)
def addDiploidPrior(pointEst, prior):
nPat, nMat, nLoci = pointEst.shape
for i in range(nLoci):
for j in range(nPat):
for k in range(nMat):
pointEst[j, k, i] *= prior[j, k]
@jit(nopython=True, nogil=True)
def getDiploidDosages(hapEst, paternalHaplotypes, maternalHaplotypes):
nPat, nLoci = paternalHaplotypes.shape
nMat, nLoci = maternalHaplotypes.shape
dosages = np.full(nLoci, 0, dtype=np.float32)
for i in range(nLoci):
for j in range(nPat):
for k in range(nMat):
dosages[i] += hapEst[i, j, k]*(paternalHaplotypes[j, i] + maternalHaplotypes[k, i])
return dosages
@jit(nopython=True)
def getDiploidProbabilities(hapEst, paternalHaplotypes, maternalHaplotypes):
nPat, nLoci = paternalHaplotypes.shape
nMat, nLoci = maternalHaplotypes.shape
probs = np.full((4, nLoci), 0, dtype = np.float32)
for i in range(nLoci):
for j in range(nPat):
for k in range(nMat):
if paternalHaplotypes[j, i] == 0 and maternalHaplotypes[k, i] == 0:
probs[0, i] += hapEst[j, k, i]
if paternalHaplotypes[j, i] == 0 and maternalHaplotypes[k, i] == 1:
probs[1, i] += hapEst[j, k, i]
if paternalHaplotypes[j, i] == 1 and maternalHaplotypes[k, i] == 0:
probs[2, i] += hapEst[j, k, i]
if paternalHaplotypes[j, i] == 1 and maternalHaplotypes[k, i] == 1:
probs[3, i] += hapEst[j, k, i]
return probs
@jit(nopython=True, nogil=True)
def getDiploidSample(point_estimate, recombination_rate, paternal_haps, maternal_haps):
"""Sample a pair of haplotypes"""
forward_probs = diploid_forward(point_estimate, recombination_rate, in_place=True)
haplotypes = diploidSampleHaplotypes(forward_probs, recombination_rate, paternal_haps, maternal_haps)
return haplotypes
@jit(nopython=True, nogil=True)
def get_viterbi(point_estimates, recombination_rate, paternal_haplotypes, maternal_haplotypes):
"""Get most likely haplotype pair using the Viterbi algorithm"""
n_loci = point_estimates.shape[0]
haplotypes = np.full((2, n_loci), 9, dtype=np.int8)
forward_probs = diploid_forward(point_estimates, recombination_rate)
paternal_indices, maternal_indices = diploid_viterbi(forward_probs, recombination_rate)
haplotypes[0] = haplotype_from_indices(paternal_indices, paternal_haplotypes)
haplotypes[1] = haplotype_from_indices(maternal_indices, maternal_haplotypes)
return haplotypes
@jit(nopython=True, nogil=True)
def getDiploidPointEstimates(indGeno, indPatHap, indMatHap, paternalHaplotypes, maternalHaplotypes, error):
nPat, nLoci = paternalHaplotypes.shape
nMat, nLoci = maternalHaplotypes.shape
pointEst = np.full((nLoci, nPat, nMat), 1, dtype=np.float32)
for i in range(nLoci):
if indGeno[i] != 9:
for j in range(nPat):
for k in range(nMat):
# Seperate Phased vs non phased loci
if indPatHap[i] != 9 and indMatHap[i] != 9:
value = 1
if indPatHap[i] == paternalHaplotypes[j, i]:
value *= (1-error[i])
else:
value *= error[i]
if indMatHap[i] == maternalHaplotypes[k, i]:
value *= (1-error[i])
else:
value *= error[i]
pointEst[i, j, k] = value
else:
#I think this shouldn't be too horrible.
sourceGeno = paternalHaplotypes[j, i] + maternalHaplotypes[k, i]
if sourceGeno == indGeno[i]:
pointEst[i, j, k] = 1-error[i]*error[i]
else:
pointEst[i, j, k] = error[i]*error[i]
return pointEst
@jit(nopython=True, nogil=True)
def getDiploidPointEstimates_geno(indGeno, paternalHaplotypes, maternalHaplotypes, error, pointEst):
nPat, nLoci = paternalHaplotypes.shape
nMat, nLoci = maternalHaplotypes.shape
for i in range(nLoci):
if indGeno[i] != 9:
error_2 = error[i]*error[i]
for j in range(nPat):
for k in range(nMat):
#I think this shouldn't be too horrible.
sourceGeno = paternalHaplotypes[j, i] + maternalHaplotypes[k, i]
if sourceGeno == indGeno[i]:
pointEst[i, j, k] = 1-error_2
else:
pointEst[i, j, k] = error_2
@jit(nopython=True)
def getDiploidPointEstimates_probs(indProbs, paternalHaplotypes, maternalHaplotypes, error):
nPat, nLoci = paternalHaplotypes.shape
nMat, nLoci = maternalHaplotypes.shape
pointEst = np.full((nPat, nMat, nLoci), 1, dtype = np.float32)
for i in range(nLoci):
for j in range(nPat):
for k in range(nMat):
# I'm just going to be super explicit here.
p_aa = indProbs[0, i]
p_aA = indProbs[1, i]
p_Aa = indProbs[2, i]
p_AA = indProbs[3, i]
e = error[i]
if paternalHaplotypes[j,i] == 0 and maternalHaplotypes[k, i] == 0:
value = p_aa*(1-e)**2 + (p_aA + p_Aa)*e*(1-e) + p_AA*e**2
if paternalHaplotypes[j,i] == 1 and maternalHaplotypes[k, i] == 0:
value = p_Aa*(1-e)**2 + (p_aa + p_AA)*e*(1-e) + p_aA*e**2
if paternalHaplotypes[j,i] == 0 and maternalHaplotypes[k, i] == 1:
value = p_aA*(1-e)**2 + (p_aa + p_AA)*e*(1-e) + p_Aa*e**2
if paternalHaplotypes[j,i] == 1 and maternalHaplotypes[k, i] == 1:
value = p_AA*(1-e)**2 + (p_aA + p_Aa)*e*(1-e) + p_aa*e**2
pointEst[j,k,i] = value
return pointEst
@jit(nopython=True)
def diploid_normalize(array):
"""Normalize a 'diploid' probability array in place
The array should have shape: (# paternal haplotypes, # maternal haplotypes, # loci)
The function normalizes such that the values at each locus sum to 1
It's possible the accuracy could be improved by using a compensated summation algorithm, e.g.:
https://en.wikipedia.org/wiki/Kahan_summation_algorithm
As it stands, running np.sum(array, axis=(0,1)) on the output of this function with an array
of shape (200,200,1000) gives values that differ from 1 by ~1e-4
Note also that Numba doesn't support axis=(0,1) so we can't use that"""
n_pat, n_mat, n_loci = array.shape
for i in range(n_loci):
# Use a float64 accumulator to avoid losing precision
sum_ = np.float64(0)
for j in range(n_pat):
for k in range(n_mat):
sum_ += array[j, k, i]
for j in range(n_pat):
for k in range(n_mat):
array[j, k, i] = array[j, k, i]/sum_
@jit(nopython=True)
def transmit(previous_estimate, recombination_rate, output, pat, mat):
"""Transforms a probability distribution (over pairs of paternal and maternal haplotypes, at a single locus)
to a probability distribution at the next locus by accounting for emission probabilities (point_estimates)
and transition probabilities (recombination_rate)
This is a core step in the forward and backward algorithms
point_estimates probability distribution to be transmitted forward. Assume already normalized.
shape: (# paternal haplotypes, # maternal haplotypes)
recombination_rate recombination rate at this locus - scalar
output newly calculated probability distribution over pairs of haplotypes at *this* locus
shape: (# paternal haplotypes, # maternal haplotypes)
Note: previous and estimate are updated by this function"""
n_pat, n_mat = previous_estimate.shape
# Get haplotype specific recombinations
pat[:] = 0
mat[:] = 0
for j in range(n_pat):
for k in range(n_mat):
pat[j] += previous_estimate[j, k]
mat[k] += previous_estimate[j, k]
e = recombination_rate
e1e = (1-e)*e
e2m1 = (1-e)**2
# Adding modifications to pat and mat to take into account number of haplotypes and recombination rate.
pat *= e1e/n_pat
mat *= e1e/n_mat
e2 = e*e/(n_mat*n_pat)
# Account for recombinations
for j in range(n_pat):
for k in range(n_mat):
output[j, k] = previous_estimate[j, k]*e2m1 + pat[j] + mat[k] + e2
@jit(nopython=True, nogil=True)
def diploid_forward(point_estimate, recombination_rate, in_place=False):
"""Calculate forward probabilities combined with the point_estimates"""
n_loci, n_pat, n_mat = point_estimate.shape
if in_place:
combined = point_estimate
else:
combined = point_estimate.copy() # copy so that point_estimate is not modified
prev = np.full((n_pat, n_mat), 0.25, dtype=np.float32)
# Temporary numba variables.
forward_i = np.empty((n_pat, n_mat), dtype=np.float32)
tmp_pat = np.empty(n_pat, dtype=np.float32)
tmp_mat = np.empty(n_mat, dtype=np.float32)
# Make sure the first locus is normalized.
combined[0, :, :] /= np.sum(combined[:, :])
for i in range(1, n_loci):
# Update estimates at this locus
# Take the value at locus i-1 and transmit it forward.
transmit(combined[i-1, :, :], recombination_rate[i], forward_i, tmp_pat, tmp_mat)
# Combine the forward estimate at locus i with the point estimate at i.
# This is safe if in_place = True since we have not updated combined[i,:,:] yet and it will be still equal to point_estimate.
combined[i, :, :] = point_estimate[i, :, :] * forward_i
combined[i, :, :] /= np.sum(combined[i, :, :])
return combined
@jit(nopython=True, nogil=True)
def diploid_backward(point_estimate, recombination_rate):
"""Calculate backward probabilities"""
n_loci, n_pat, n_mat = point_estimate.shape
backward = np.ones_like(point_estimate, dtype=np.float32)
prev = np.full((n_pat, n_mat), 0.25, dtype=np.float32)
# Temporary numba variables.
combined_i = np.empty((n_pat, n_mat), dtype=np.float32)
tmp_pat = np.empty(n_pat, dtype=np.float32)
tmp_mat = np.empty(n_mat, dtype=np.float32)
for i in range(n_loci-2, -1, -1):
# Skip the first loci.
# Combine the backward estimate at i+1 with the point estimate at i+1 (unlike the forward pass, the backward estimate does not contain the point_estimate).
combined_i[:, :] = backward[i+1, :, :] * point_estimate[i+1, :, :]
combined_i[:, :] /= np.sum(combined_i)
# Transmit the combined value forward. This is the backward estimate.
transmit(combined_i, recombination_rate[i], backward[i, :, :], tmp_pat, tmp_mat)
return backward
@jit(nopython=True, nogil=True)
def diploidForwardBackward(point_estimate, recombination_rate):
"""Calculate state probabilities at each loci using the forward-backward algorithm"""
# We may want to split this out into something else.
est = diploid_backward(point_estimate, recombination_rate)
est *= diploid_forward(point_estimate, recombination_rate, in_place=True)
# Return normalized probabilities
n_loci = est.shape[0]
for i in range(n_loci):
est[i, :, :] /= np.sum(est[i, :, :])
return est
@jit(nopython=True, nogil=True)
def diploidSampleHaplotypes(forward_probs, recombination_rate, paternal_haplotypes, maternal_haplotypes):
"""Sample a pair of paternal and maternal haplotypes from the forward and backward probability distributions
and paternal and maternal haplotype libraries.
Returns:
haplotypes Pair of haplotypes as a 2D array of shape (2, n_loci)
"""
n_loci = forward_probs.shape[0]
haplotypes = np.full((2, n_loci), 9, dtype=np.int8)
paternal_indices, maternal_indices = diploidOneSample(forward_probs, recombination_rate)
haplotypes[0] = haplotype_from_indices(paternal_indices, paternal_haplotypes)
haplotypes[1] = haplotype_from_indices(maternal_indices, maternal_haplotypes)
return haplotypes
@jit(nopython=True)
def diploidOneSample(forward_probs, recombination_rate):
"""Sample a pair of paternal and maternal haplotypes from the forward and backward probability distributions
Returns:
paternal_indices, maternal_indices - arrays of sampled haplotype indices
A description of the sampling process would be nice here..."""
n_loci, n_pat, n_mat = forward_probs.shape
pvals = np.empty((n_pat, n_mat), dtype=np.float32) # sampled probability distribution at one locus
paternal_indices = np.empty(n_loci, dtype=np.int64)
maternal_indices = np.empty(n_loci, dtype=np.int64)
# Backwards algorithm
for i in range(n_loci-1, -1, -1): # zero indexed then minus one since we skip the boundary
# Sample at this locus
if i == n_loci-1:
pvals[:, :] = forward_probs[i, :, :]
else:
combine_backward_sampled_value(forward_probs[i, :, :], paternal_indices[i+1], maternal_indices[i+1], recombination_rate[i+1], pvals[:, :])
j, k = NumbaUtils.multinomial_sample_2d(pvals=pvals)
paternal_indices[i] = j
maternal_indices[i] = k
# Last sample (at the first locus)
j, k = NumbaUtils.multinomial_sample_2d(pvals=pvals)
paternal_indices[0] = j
maternal_indices[0] = k
return paternal_indices, maternal_indices
@jit(nopython=True, nogil=True)
def diploid_viterbi(forward_probs, recombination_rate):
"""Determine the most likely haplotype pair (paternal and maternal) using the Viterbi algorithm
Returns:
paternal_indices, maternal_indices - arrays of sampled haplotype indices"""
n_loci, n_pat, n_mat = forward_probs.shape
# pvals is the most ikely probability distribution at one locus
pvals = np.empty((n_pat, n_mat), dtype=np.float32)
paternal_indices = np.empty(n_loci, dtype=np.int64)
maternal_indices = np.empty(n_loci, dtype=np.int64)
# Backwards algorithm
for i in range(n_loci-1, -1, -1): # zero indexed then minus one since we skip the boundary
# Sample at this locus
if i == n_loci-1:
pvals[:, :] = forward_probs[i, :, :]
else:
combine_backward_sampled_value(forward_probs[i, :, :], paternal_indices[i+1], maternal_indices[i+1], recombination_rate[i+1], pvals[:, :])
idx = np.argmax(pvals)
j, k = idx//n_mat, idx%n_mat
paternal_indices[i] = j
maternal_indices[i] = k
# Last sample (at the first locus)
idx = np.argmax(pvals)
j, k = idx//n_mat, idx%n_mat
paternal_indices[0] = j
maternal_indices[0] = k
return paternal_indices, maternal_indices
@jit(nopython=True)
def diploidIndices(sampled_probs):
"""Get paternal and maternal indices from sampled probability distribution
Intended to be used with sampled probabilities as returned from diploidOneSample()"""
n_loci, n_pat, n_mat = sampled_probs.shape
paternal_indices = np.empty(n_loci, dtype=np.int64)
maternal_indices = np.empty(n_loci, dtype=np.int64)
eps = np.finfo(np.float32).eps
for i in range(n_loci):
for j in range(n_pat):
for k in range(n_mat):
# If sampled_probs[j, k, i] == 1
# set indices array to values j and k
if sampled_probs[i, j, k] > 1-eps:
paternal_indices[i] = j
maternal_indices[i] = k
return paternal_indices, maternal_indices
@jit(nopython=True)
def combine_backward_sampled_value(previous_estimate, pat_hap, mat_hap, recombination_rate, output):
"""Includes information from the previous sampled locus into the estimate at the current sampled locus.
previous_estimate combination of the forward + point estimate at the locus.
shape: (# paternal haplotypes, # maternal haplotypes)
pat_hap, mat_hap Sampled paternal and maternal haplotypes -- integer
recombination_rate recombination rate at this locus - scalar
output newly calculated probability distribution over pairs of haplotypes at *this* locus
shape: (# paternal haplotypes, # maternal haplotypes)
Note: previous and estimate are updated by this function"""
n_pat, n_mat = previous_estimate.shape
e = recombination_rate
single_rec = (1-e)*e
no_rec = (1-e)**2
double_rec = e*e
# Haplotype is moving from pat_hap, mat_hap.
# Double recombination -- both haplotypes change.
output[:, :] = double_rec/(n_mat*n_pat)
# Maternal recombination -- pat_hap stays the same.
for k in range(n_mat):
output[pat_hap, k] += single_rec/n_mat
# Paternal recombination -- mat_hap stays the same.
for j in range(n_pat):
output[j, mat_hap] += single_rec/n_pat
# No recombinations -- both haplotypes stay the same.
output[pat_hap, mat_hap] += no_rec
# Add in forward_plus_combined
output *= previous_estimate
return output
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/DiploidHMM.py
|
DiploidHMM.py
|
import random
import numpy as np
import numba
from numba import njit, jit, int8, int32,int64, boolean, deferred_type, optional, float32
from numba.experimental import jitclass
from collections import OrderedDict
try:
profile
except:
def profile(x):
return x
#####################################
### ###
### Burrows Wheeler ###
### ###
#####################################
class BurrowsWheelerLibrary():
def __init__(self, haplotypeList):
self.library = createBWLibrary(np.array(haplotypeList))
self.hapList = haplotypeList
self.nHaps = len(haplotypeList)
def getHaplotypeMatches(self, haplotype, start, stop):
nHaps, hapIndexes = getConsistentHaplotypes(self.library, haplotype, start, stop)
haps = [(self.hapList[hapIndexes[index, 0]], hapIndexes[index, 1]) for index in range(nHaps)]
return haps
@profile
def getBestHaplotype(self, weights, start, stop):
index = getHaplotypesPlusWeights(self.library, weights, start, stop)
return self.hapList[index][start:stop]
jit_BurrowsWheelerLibrary_spec = OrderedDict()
jit_BurrowsWheelerLibrary_spec['a'] = int64[:,:]
jit_BurrowsWheelerLibrary_spec['d'] = int64[:,:]
jit_BurrowsWheelerLibrary_spec['zeroOccPrev'] = int64[:,:]
jit_BurrowsWheelerLibrary_spec['nZeros'] = int64[:]
jit_BurrowsWheelerLibrary_spec['haps'] = int8[:,:]
@jitclass(jit_BurrowsWheelerLibrary_spec)
class jit_BurrowsWheelerLibrary():
def __init__(self, a, d, nZeros, zeroOccPrev, haps):
self.a = a
self.d = d
self.nZeros = nZeros
self.zeroOccPrev = zeroOccPrev
self.haps = haps
def getValues(self):
return (self.a, self.d, self.nZeros, self.zeroOccPrev, self.haps)
def update_state(self, state, index):
pass
def get_null_state(self, value, index):
if value == 0:
lowerR = 0
upperR = nZeros[stop-1]
if value == 1:
lowerR = nZeros[stop-1]
upperR = nHaps
pass
@njit
def createBWLibrary(haps):
#Definitions.
# haps : a list of haplotypes
# a : an ordering of haps in lexographic order.
# d : Number of loci of a[i,j+k] == a[i,-1, j+k]
nHaps = haps.shape[0]
nLoci = haps.shape[1]
a = np.full(haps.shape, 0, dtype = np.int64)
d = np.full(haps.shape, 0, dtype = np.int64)
nZerosArray = np.full(nLoci, 0, dtype = np.int64)
zeros = np.full(nHaps, 0, dtype = np.int64)
ones = np.full(nHaps, 0, dtype = np.int64)
dZeros = np.full(nHaps, 0, dtype = np.int64)
dOnes = np.full(nHaps, 0, dtype = np.int64)
nZeros = 0
nOnes = 0
for j in range(nHaps):
if haps[j, nLoci-1] == 0:
zeros[nZeros] = j
if nZeros == 0:
dZeros[nZeros] = 0
else:
dZeros[nZeros] = 1
nZeros += 1
else:
ones[nOnes] = j
if nOnes == 0:
dOnes[nOnes] = 0
else:
dOnes[nOnes] = 1
nOnes += 1
if nZeros > 0:
a[0:nZeros, nLoci-1] = zeros[0:nZeros]
d[0:nZeros, nLoci-1] = dZeros[0:nZeros]
if nOnes > 0:
a[nZeros:nHaps, nLoci-1] = ones[0:nOnes]
d[nZeros:nHaps, nLoci-1] = dOnes[0:nOnes]
nZerosArray[nLoci-1] = nZeros
for i in range(nLoci-2, -1, -1) :
zeros = np.full(nHaps, 0, dtype = np.int64)
ones = np.full(nHaps, 0, dtype = np.int64)
dZeros = np.full(nHaps, 0, dtype = np.int64)
dOnes = np.full(nHaps, 0, dtype = np.int64)
nZeros = 0
nOnes = 0
dZerosTmp = -1 #This is a hack.
dOnesTmp = -1
for j in range(nHaps) :
dZerosTmp = min(dZerosTmp, d[j,i+1])
dOnesTmp = min(dOnesTmp, d[j,i+1])
if haps[a[j, i+1], i] == 0:
zeros[nZeros] = a[j, i+1]
dZeros[nZeros] = dZerosTmp + 1
nZeros += 1
dZerosTmp = nLoci
else:
ones[nOnes] = a[j, i+1]
dOnes[nOnes] = dOnesTmp + 1
nOnes += 1
dOnesTmp = nLoci
if nZeros > 0:
a[0:nZeros, i] = zeros[0:nZeros]
d[0:nZeros, i] = dZeros[0:nZeros]
if nOnes > 0:
a[nZeros:nHaps, i] = ones[0:nOnes]
d[nZeros:nHaps, i] = dOnes[0:nOnes]
nZerosArray[i] = nZeros
#I'm going to be a wee bit sloppy in creating zeroOccPrev
#Not defined at 0 so start at 1.
zeroOccPrev = np.full(haps.shape, 0, dtype = np.int64)
for i in range(1, nLoci):
count = 0
for j in range(0, nHaps):
if haps[a[j, i], i-1] == 0:
count += 1
zeroOccPrev[j, i] = count
library = jit_BurrowsWheelerLibrary(a, d, nZerosArray, zeroOccPrev, haps)
return library
@jit(nopython=True, nogil=True)
def getConsistentHaplotypes(bwLibrary, hap, start, stop):
a, d, nZeros, zeroOccPrev, haps = bwLibrary.getValues()
nHaps = a.shape[0]
nLoci = a.shape[1]
intervals = np.full((nHaps, 2), 0, dtype = np.int64)
intervals_new = np.full((nHaps, 2), 0, dtype = np.int64)
nIntervals = 0
nIntervals_new = 0
#Haps go from 0 to nHaps-1. Loci go from start to stop-1 (inclusive).
#The first hap with one is nZeros. The last hap with zero is nZeros -1.
#Last loci is stop -1
#These are split out because they represent *distinct* haplotypes.
#Maybe could do this with tuple and list append but *shrug*.
if hap[stop-1] == 0 or hap[stop-1] == 9:
lowerR = 0
upperR = nZeros[stop-1]
if upperR >= lowerR:
intervals[nIntervals, 0] = lowerR
intervals[nIntervals, 1] = upperR
nIntervals += 1
if hap[stop-1] == 1 or hap[stop-1] == 9:
lowerR = nZeros[stop-1]
upperR = nHaps
if upperR >= lowerR:
intervals[nIntervals, 0] = lowerR
intervals[nIntervals, 1] = upperR
nIntervals += 1
#Python indexing is annoying.
#Exclude stop and stop-1, include start.
#Intervals are over haplotypes.
for i in range(stop-2, start-1, -1):
# print(intervals[0:nIntervals,:])
nIntervals_new = 0
#Doing it on interval seems to make marginally more sense.
for interval in range(nIntervals):
int_start = intervals[interval, 0]
int_end = intervals[interval, 1]
if hap[i] == 0 or hap[i] == 9:
if int_start == 0:
lowerR = 0
else:
lowerR = zeroOccPrev[int_start-1, i+1]
upperR = zeroOccPrev[int_end-1, i+1] #Number of zeros in the region.
if upperR > lowerR: #Needs to be greater than. Regions no longer inclusive.
# print("Added 0:", int_start, int_end, "->>", lowerR, upperR)
intervals_new[nIntervals_new, 0] = lowerR
intervals_new[nIntervals_new, 1] = upperR
nIntervals_new += 1
if hap[i] == 1 or hap[i] == 9:
# of ones between 0 and k (k inclusive) is k+1 - number of zeros.
if int_start == 0:
lowerR = nZeros[i]
else:
lowerR = nZeros[i] + (int_start - zeroOccPrev[int_start-1, i+1])
upperR = nZeros[i] + (int_end - zeroOccPrev[int_end-1, i+1])
if upperR > lowerR:
# print("Added 1:", int_start, int_end, "->>", lowerR, upperR)
intervals_new[nIntervals_new, 0] = lowerR
intervals_new[nIntervals_new, 1] = upperR
nIntervals_new += 1
# else:
# print(i, "interval rejected:", int_start, int_end, "->", upperR, lowerR)
#This is basically intervals = intervals_new
for j in range(nIntervals_new):
intervals[j, 0] = intervals_new[j, 0]
intervals[j, 1] = intervals_new[j, 1]
nIntervals = nIntervals_new
# print("Finished", i, "->", nIntervals)
# print(intervals[0:nIntervals,:])
hapIndexes = np.full((nHaps, 2), 0, dtype = np.int64)
nHapsAssigned = 0
for i in range(nIntervals):
int_start = intervals[i, 0]
int_end = intervals[i, 1]
hapIndexes[nHapsAssigned, 0] = a[int_start,start]
hapIndexes[nHapsAssigned, 1] = int_end - int_start
nHapsAssigned +=1
return (nHapsAssigned, hapIndexes)
# def printSortAt(loci, library):
# a, d, nZeros, zeroOccPrev, haps = library.getValues()
# vals = haps[a[:,loci],:]
# for i in range(vals.shape[0]):
# print(i, ' '.join(map(str, vals[i,:])) )
# # print(zeroOccPrev[:,:])
# hapLib = [np.array([1, 0, 0, 0, 0, 0, 1], dtype = np.int8),
# np.array([0, 1, 0, 0, 0, 1, 0], dtype = np.int8),
# np.array([0, 1, 0, 0, 0, 1, 0], dtype = np.int8),
# np.array([0, 1, 0, 0, 0, 1, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([1, 1, 1, 0, 0, 0, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([1, 1, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([0, 0, 0, 1, 0, 0, 0], dtype = np.int8),
# np.array([0, 1, 1, 1, 0, 0, 0], dtype = np.int8),
# np.array([0, 1, 1, 1, 0, 0, 0], dtype = np.int8),
# np.array([1, 1, 0, 1, 0, 0, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8),
# np.array([0, 0, 1, 0, 1, 0, 0], dtype = np.int8)]
# bwlib = BurrowsWheelerLibrary(hapLib)
# # printSortAt(0, bwlib.library)
# printSortAt(6, bwlib.library); print("")
# printSortAt(5, bwlib.library); print("")
# printSortAt(4, bwlib.library); print("")
# printSortAt(3, bwlib.library); print("")
# printSortAt(2, bwlib.library); print("")
# printSortAt(1, bwlib.library); print("")
# printSortAt(0, bwlib.library); print("")
# # print(bwlib.getHaplotypeMatches(haplotype = np.array([0, 0, 0], dtype = np.int8), start = 0, stop = 3))
# tmp = (bwlib.getHaplotypeMatches(haplotype = np.array([9, 9, 9, 9, 9, 9, 9], dtype = np.int8), start = 0, stop = 7))
# for key, value in tmp:
# print(key, value)
@njit
def getConsistentHaplotypesBruteForce(bwLibrary, hap, start, stop):
hap = hap[start:stop]
a, d, nZeros, zeroOccPrev, haps = bwLibrary.getValues()
recodedHaps = haps[:, start:stop]
nHaps = recodedHaps.shape[0]
nLoci = recodedHaps.shape[1]
consistent = np.full(nHaps, 0, dtype = np.int64)
#Last correct index
lastIndex = -1
for j in range(nHaps):
#Otherwise, we have not enough information and need to search.
add = True
for i in range(nLoci):
if hap[i] != 9 :
if recodedHaps[j, i] != hap[i]:
add = False
if add:
consistent[j] = 1
hapIndexes = np.full((nHaps, 2), 0, dtype = np.int64)
nHapsAssigned = 0
for i in range(nHaps):
if consistent[i] > 0:
# hapIndexes[nHapsAssigned, 0] = a[i,start]
hapIndexes[nHapsAssigned, 0] = i
hapIndexes[nHapsAssigned, 1] = consistent[i]
nHapsAssigned +=1
return (nHapsAssigned, hapIndexes)
@njit
def getHaplotypesPlusWeights(bwLibrary, weights, start, stop):
#Weights are weights to the original haplotypes (haps)
a, d, nZeros, zeroOccPrev, haps = bwLibrary.getValues()
recodedWeights = weights[a[:, start]]
nHaps = d.shape[0]
nLoci = stop - start
bestLoci = -1
bestWeight = -1
currentLoci = 0
currentWeight = 0
for j in range(nHaps):
#Will need to double check this. This code will be slow!
if d[j, start] < nLoci:
#Process the last haplotype before moving on.
if currentWeight > bestWeight :
bestLoci = currentLoci
bestWeight = currentWeight
currentLoci = j
currentWeight = 0
currentWeight += recodedWeights[j]
#Make sure we check the last haplotype.
if currentWeight > bestWeight :
bestLoci = currentLoci
bestWeight = currentWeight
#REMEMBER TO RECODE
return a[bestLoci, start]
#Older version that doesn't use all of the meta data we have.
# @jit(nopython=True, nogil=True)
# def getConsistentHaplotypes(bwLibrary, hap, start, stop):
# a, d, nZeros, zeroOccPrev, haps = bwLibrary.getValues()
# hap = hap[start:stop]
# recodedHaps = haps[a[:, start], start:stop]
# nHaps = recodedHaps.shape[0]
# nLoci = recodedHaps.shape[1]
# consistent = np.full(nHaps, 0, dtype = np.int64)
# lastCorrect = -1
# firstError = nLoci + 1
# #Last correct index
# lastIndex = -1
# for j in range(nHaps):
# #Basically, we know how much overlap there was with the previous haplotype.
# #We can use that to get a better estimate of where this one will be correct.
# #By definition, all of 0 -> d[j, start]-1 inclusive is the same.
# #All of 0 -> lastCorrect (inclusive) is correct.
# #First error is the position of the first error. If firstError < nLoci, this is a problem. (nLoci is out of our bounds)
# lastCorrect = min(lastCorrect, d[j, start]-1)
# if firstError > d[j,start]-1: firstError = nLoci
# #Two elif statements.
# #First can we spot an error?
# #Second if no error's exist, are we sure that the entire haplotype is right.
# if firstError < nLoci: #or equal?
# consistent[j] = 0
# lastIndex = -1
# #nLoci is the last position we care about (nLoci is out of our bounds). If nLoci is correct, then we're good).
# elif lastCorrect >= nLoci-1:
# #Adding in some short circuit code to prevent duplication
# # lastIndex = -1 ###THIS LINE TOGGLES DUPLICATION PREVENTION
# if lastIndex != -1:
# consistent[lastIndex] += 1
# else:
# consistent[j] = 1
# lastIndex = j
# else:
# #Otherwise, we have not enough information and need to search.
# #Last correct is the last known correct loci.
# stopIters = False
# i = lastCorrect
# while not stopIters:
# i = i+1 #We know that the last correct loci is correct. So we're safe to start at last correct +1
# if hap[i] != 9 and recodedHaps[j, i] != hap[i]:
# lastCorrect = i-1
# firstError = i
# stopIters = True
# elif i == nLoci-1:
# stopIters = True
# lastCorrect = nLoci-1
# firstError = nLoci
# if firstError < nLoci:
# consistent[j] = 0
# lastIndex = -1
# elif lastCorrect >= nLoci-1: #This will probably be nLoci-1 since that is where our search stops.
# consistent[j] = 1
# lastIndex = j
# hapIndexes = np.full((nHaps, 2), 0, dtype = np.int64)
# nHapsAssigned = 0
# for i in range(nHaps):
# if consistent[i] > 0:
# hapIndexes[nHapsAssigned, 0] = a[i,start]
# hapIndexes[nHapsAssigned, 1] = consistent[i]
# nHapsAssigned +=1
# return (nHapsAssigned, hapIndexes)
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/BurrowsWheelerLibrary.py
|
BurrowsWheelerLibrary.py
|
import pickle
import random
import numpy as np
from numba import njit, jit
from numba.experimental import jitclass
def profile(x):
return x
# Helper functions
def slices(start, length, n):
"""Return `n` slices starting at `start` of length `length`"""
return [slice(i, i+length) for i in range(start, length*n + start, length)]
def bin_slices(l, n):
"""Return a list of slice() objects that split l items into n bins
The first l%n bins are length l//n+1; the remaining n-l%n bins are length l//n
Similar to np.array_split()"""
return slices(0, l//n+1, l%n) + slices((l//n+1)*(l%n), l//n, n-l%n)
def topk_indices(genotype, haplotypes, n_topk):
"""Return top-k haplotype indices with fewest opposite homozygous markers compared to genotype
The array of number of opposite homozygous loci for each haplotype (`opposite_homozygous`)
is first shuffled so that tied values are effectively randomly sampled"""
# Mask of homozygous loci in the genotype
homozygous_mask = (genotype == 0) | (genotype == 2) # note: this purposefully ignores missing loci
# Select just these homozygous loci
g = genotype[homozygous_mask]
h = haplotypes[:, homozygous_mask]
# Number of opposite homozygous loci for all haplotypes
opposite_homozygous = np.sum(g//2 != h, axis=1)
# Shuffle indices
indices = np.arange(len(opposite_homozygous))
np.random.shuffle(indices)
# Stable argsort on the shuffled values
args = np.argsort(opposite_homozygous[indices], kind='stable')
# Top-k indices (fewest opposite homozygous loci)
return indices[args[:n_topk]]
class HaplotypeLibrary(object):
"""A library of haplotypes
Each haplotype can have an identifier (any Python object, but typically a str or int)
The identifiers are used to select haplotypes for updating or masking
Haplotypes should be NumPy arrays of dtype np.int8
Some functions only work on frozen libraries; some only on unfrozen ones.
Use freeze() and unfreeze() to swap between the two states. Typically a library is
built with append() and then frozen to enable additional functionality"""
def __init__(self, n_loci=None):
self._n_loci = n_loci
self._frozen = False
self._haplotypes = []
self._identifiers = [] # index to identifier mapping
self.dtype = None
def __repr__(self):
return repr(self._identifiers) + '\n' + repr(self._haplotypes)
def __len__(self):
"""Number of haplotypes in the library"""
return len(self._haplotypes)
def __iter__(self):
"""Iterate over tuple of (id, haplotype)"""
for i in range(len(self)):
yield self._identifiers[i], self._haplotypes[i]
def append(self, haplotype, identifier=None):
"""Append a single haplotype to the library.
Note: a copy of the haplotype is taken"""
if self._frozen:
raise RuntimeError('Cannot append to frozen library')
if self.dtype is None:
self.dtype = haplotype.dtype
if self._n_loci is None:
self._n_loci = len(haplotype)
self._check_haplotype(haplotype, expected_shape=(self._n_loci,))
self._identifiers.append(identifier)
self._haplotypes.append(haplotype.copy())
def freeze(self):
"""Freeze the library: convert identifier and haplotype lists to NumPy arrays"""
if self._frozen:
raise RuntimeError('Cannot freeze an already frozen library')
self._haplotypes = np.array(self._haplotypes)
self._identifiers = np.array(self._identifiers)
self._frozen = True
def unfreeze(self):
"""Unfreeze the library: convert identifiers and haplotypes to lists"""
if not self._frozen:
raise RuntimeError('Cannot unfreeze an unfrozen library')
self._haplotypes = list(self._haplotypes)
self._identifiers = list(self._identifiers)
self._frozen = False
def update(self, haplotypes, identifier):
"""Update identifier's haplotypes
'haplotypes' can be a 1d array of loci or a 2d array of shape(#haps, #loci)"""
if not self._frozen:
raise RuntimeError('Cannot update an unfrozen library')
self._check_haplotype_dtype(haplotypes)
indices = self._indices(identifier)
# Use Numpy's broadcasting checks to handle mismatch of shape in the following:
self._haplotypes[indices] = haplotypes
def exclude_identifiers(self, identifiers):
"""Return a NumPy array of haplotypes excluding specified identifiers
'identifiers' can be a single identifier or iterable of identifiers"""
if not self._frozen:
raise RuntimeError('Cannot exclude from an unfrozen library')
mask = ~np.isin(self._identifiers, identifiers)
return self._haplotypes[mask]
def sample_only_identifiers(self, identifiers):
"""Returns HaplotypeLibrary object of haplotypes only including specified identifiers
'identifiers' can be a single identifier or iterable of identifiers"""
if not self._frozen:
raise RuntimeError('Cannot exclude from an unfrozen library')
mask = np.isin(self._identifiers, identifiers)
return self._sampled_library(mask)
def sample(self, n_haplotypes):
"""Return a NumPy array of randomly sampled haplotypes"""
if not self._frozen:
raise RuntimeError('Cannot sample from an unfrozen library')
if n_haplotypes > len(self):
n_haplotypes = len(self)
sampled_indices = np.sort(np.random.choice(len(self), size=n_haplotypes, replace=False))
return self._haplotypes[sampled_indices]
def sample_targeted(self, n_haplotypes, genotype, n_bins, exclude_identifiers=None):
"""Sample haplotypes that 'closely match' genotype `genotype`"""
if not self._frozen:
raise RuntimeError('Cannot sample from an unfrozen library')
if n_haplotypes > len(self):
return self
# Get top-k in a number of marker bins
n_topk = n_haplotypes # unnecessary variable redifinition
indices = np.empty((n_topk, n_bins), dtype=np.int64)
for i, s in enumerate(bin_slices(self._n_loci, n_bins)):
indices[:, i] = topk_indices(genotype[s], self._haplotypes[:, s], n_topk)
# Get top n_haplotypes across the bins excluding any in exclude_ifentifiers
sampled_indices = set()
exclude_indices = set(self._indices(exclude_identifiers))
for idx in indices.flatten():
if idx not in exclude_indices:
sampled_indices.add(idx)
if len(sampled_indices) >= n_topk:
break
sampled_indices = list(sampled_indices)
# Return HaplotypeLibrary object
return self._sampled_library(sampled_indices)
def exclude_identifiers_and_sample(self, identifiers, n_haplotypes):
"""Return a NumPy array of (n_haplotypes) randomly sampled haplotypes
excluding specified identifiers.
'identifiers' can be a single identifier or an iterable of identifiers
Note: A copy of the haplotypes are created because of fancy indexing"""
# Exclude specified identifiers
if not self._frozen:
raise RuntimeError('Cannot sample or exclude from an unfrozen library')
exclude_mask = ~np.isin(self._identifiers, identifiers)
n_remaining_haplotypes = exclude_mask.sum()
# Generate random sample
if n_haplotypes > n_remaining_haplotypes:
n_haplotypes = n_remaining_haplotypes
sampled_indices = np.random.choice(n_remaining_haplotypes, size=n_haplotypes, replace=False)
sampled_indices.sort()
# Return HaplotypeLibrary object
return self._sampled_library(sampled_indices)
def asMatrix(self):
"""Return the NumPy array - kept for backwards compatibility"""
if self._frozen:
return self._haplotypes.copy()
return np.array(self._haplotypes)
def removeMissingValues(self):
"""Replace missing values randomly with 0 or 1 with 50 % probability
kept for backwards compatibility"""
for hap in self._haplotypes:
removeMissingValues(hap)
def get_called_haplotypes(self, threshold = 0.99):
"""Return "called" haplotypes -- these are haplotypes which only contain integer values (0,1,9).
For haplotypes where there is uncertainty, a threshold is used to determine whether the value is called as a value or is missing. """
if not self._frozen:
self.freeze()
if self.dtype is np.int8:
return self._haplotypes
else:
called_haplotypes = np.full(self._haplotypes.shape, 0, dtype = np.float32)
for i in range(called_haplotypes.shape[0]):
called_haplotypes[i,:] = self.call_haplotypes(self._haplotypes[i,:], threshold)
return called_haplotypes
@staticmethod
@jit(nopython=True)
def call_haplotypes(hap, threshold):
nLoci = len(hap)
output = np.full(nLoci, 9, dtype = np.int8)
for i in range(nLoci):
if hap[i] <= 1 :
if hap[i] > threshold : output[i] = 1
if hap[i] < 1-threshold : output[i] = 0
return output
def get_haplotypes(self):
if not self._frozen:
self.freeze()
return self._haplotypes
def get_identifiers(self):
"""Get haplotype identifiers"""
return list(self._identifiers)
def _indices(self, identifier):
"""Get row indices associated with an identifier. These can be used for fancy indexing"""
# Return empty list if identifier == None
if not identifier:
return list()
if not self._frozen:
raise RuntimeError("Cannot get indices from an unfrozen library")
if identifier not in self._identifiers:
raise KeyError(f"Identifier '{identifier}' not in library")
return np.flatnonzero(self._identifiers == identifier).tolist()
def _check_haplotype_dtype(self, haplotype):
"""Check the haplotype has expected dtype"""
if haplotype.dtype != self.dtype:
raise TypeError('haplotype(s) not equal to library dtype, {self.dtype}')
def _check_haplotype(self, haplotype, expected_shape):
"""Check haplotype has expected shape and dtype.
Could extend to check values in {0,1,9}"""
self._check_haplotype_dtype(haplotype)
if haplotype.shape != expected_shape:
raise ValueError('haplotype(s) has unexpected shape')
def _sampled_library(self, indices):
"""Create a 'duplicated' HaplotypeLibrary consisting of specified indices only"""
# Create HaplotypeLibrary object to return
library = HaplotypeLibrary(self._n_loci)
library.dtype = self.dtype
library._haplotypes = self._haplotypes[indices]
library._identifiers = self._identifiers[indices]
library.freeze()
return library
@jit(nopython=True)
def haplotype_from_indices(indices, haplotype_library):
"""Helper function that takes an array of indices (for each locus) that 'point' to rows
in a haplotype library (NumPy array) and extracts the alleles from the corresponding haplotypes
(in the library)
Returns: a haplotype array of length n_loci"""
n_loci = len(indices)
haplotype = np.empty(n_loci, dtype=np.int8)
for col_idx in range(n_loci):
row_idx = indices[col_idx]
haplotype[col_idx] = haplotype_library[row_idx, col_idx]
return haplotype
@njit
def removeMissingValues(hap):
for i in range(len(hap)) :
if hap[i] == 9:
hap[i] = random.randint(0, 1)
class ErrorLibrary(object):
@profile
def __init__(self, hap, haplotypes):
self.hap = hap
self.hapLib = haplotypes
self.errors = jit_assessErrors(hap, self.hapLib)
def getWindowValue(self, k):
return jit_getWindowValue(self.errors, k, self.hap)
@jit(nopython=True)
def jit_getWindowValue(errors, k, hap) :
window = np.full(errors.shape, 0, dtype = np.int8)
nHaps, nLoci = errors.shape
#Let me be silly here.
for i in range(k+1):
window[:,0] += errors[:,i]
for i in range(1, nLoci):
window[:,i] = window[:,i-1]
if i > k:
if hap[i-k-1] != 9:
window[:,i] -= errors[:,i-k-1] #This is no longer in the window.
if i < (nLoci-k):
if hap[i+k] != 9:
window[:,i] += errors[:,i+k] #This is now included in the window.
return window
@jit(nopython=True)
def jit_assessErrors(hap, haps):
errors = np.full(haps.shape, 0, dtype = np.int8)
nHaps, nLoci = haps.shape
for i in range(nLoci):
if hap[i] != 9:
if hap[i] == 0:
errors[:, i] = haps[:,i]
if hap[i] == 1:
errors[:, i] = 1-haps[:,i]
return errors
from collections import OrderedDict
class HaplotypeDict(object):
def __init__(self):
self.nHaps = 0
self.haps = []
self.tree = dict()
# @profile
def append(self, haplotype):
byteVal = haplotype.tobytes()
if byteVal in self.tree:
return self.tree[byteVal]
else:
self.tree[byteVal] = self.nHaps
self.haps.append(haplotype)
self.nHaps += 1
return self.nHaps -1
return self.tree[byteVal]
def get(self, index):
return self.haps[index]
# hap = np.array([0, 0, 0, 0, 0, 0, 0, 0])
# hapLib = [np.array([1, 0, 0, 0, 0, 0, 1]),
# np.array([0, 1, 0, 0, 0, 1, 0]),
# np.array([0, 0, 1, 0, 1, 0, 0]),
# np.array([0, 0, 0, 1, 0, 0, 0]),
# np.array([0, 0, 1, 0, 1, 0, 0])]
# aa = ErrorLibrary(hap, hapLib)
# print(aa.errors)
# print(aa.getWindowValue(2))
# node_type = deferred_type()
# jit_randomBinary_spec = OrderedDict()
# jit_randomBinary_spec['array'] = int64[:]
# jit_randomBinary_spec['index'] = int64
# jit_randomBinary_spec['nItems'] = int64
# @jitclass(jit_randomBinary_spec)
# class jit_RandomBinary(object):
# def __init__(self, nItems):
# self.index = 0
# self.nItems = nItems
# self.array = np.random.randint(2, size = nItems)
# def next():
# self.index += 1
# if self.index == self.nItems:
# self.array = np.random.randint(2, size = nItems)
# self.index = 0
# return self.array[self.index]
# I Don't think this is used any more.
# def getCores(nLoci, lengths, offsets = [0]) :
# nCores = len(lengths)*len(offsets)
# startStop = []
# for length in lengths:
# for offset in offsets:
# finished = False
# if offset > 0:
# start = 0
# stop = min(offset, nLoci)
# startStop.append((start, stop))
# if stop == nLoci: finished = True
# else:
# stop = 0
# while not finished:
# start = stop
# stop = min(stop + length, nLoci)
# startStop.append((start, stop))
# if stop == nLoci: finished = True
# return startStop
# from collections import OrderedDict
# class HaplotypeCount_dict(object):
# def __init__(self):
# self.nHaps = 0
# self.haps = []
# self.tree = OrderedDict()
# # @profile
# @profile
# def append(self, haplotype, score = 1):
# byteVal = haplotype.tobytes()
# if byteVal in self.tree:
# self.tree[byteVal] += score
# else:
# self.tree[byteVal] = score
# self.haps.append(haplotype)
# self.nHaps += 1
# def getLargest(self):
# #This is sloppy
# vals = [value for key, value in self.tree.items()]
# index = np.argmax(vals)
# return self.haps[index]
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/HaplotypeLibrary.py
|
HaplotypeLibrary.py
|
from numba import jit
import numpy as np
from . import NumbaUtils
from . HaplotypeLibrary import haplotype_from_indices
def haploidHMM(individual, source_haplotypes, error, recombination_rate, threshold=0.9, calling_method='dosages'):
target_haplotype = individual.haplotypes
n_loci = len(target_haplotype)
# !!!! May need to cast the source Haplotypes to a matrix. #May also want to handle the probabilistic case.
if type(source_haplotypes) is list:
source_haplotypes = np.array(source_haplotypes)
# Expand error and recombinationRate to arrays as may need to have
# marker specific error/recombination rates.
if type(error) is float:
error = np.full(n_loci, error, dtype=np.float32)
if type(recombination_rate) is float:
recombination_rate = np.full(n_loci, recombination_rate, dtype=np.float32)
# Construct penetrance values (point estimates)
point_estimates = getHaploidPointEstimates(target_haplotype, source_haplotypes, error)
# Run forward-backward algorithm on penetrance values
# Note: don't need these probabilites if using the sample method
if calling_method != 'sample':
total_probs = haploidForwardBackward(point_estimates, recombination_rate)
# Handle the different calling methods
if calling_method == 'callhaps':
# Call haplotypes
called_haps = haploidCallHaps(total_probs, threshold)
# Call genotypes
called_genotypes = getHaploidGenotypes(called_haps, source_haplotypes)
return called_genotypes
if calling_method == 'dosages':
dosages = getHaploidDosages(total_probs, source_haplotypes)
individual.dosages = dosages
if calling_method == 'sample':
haplotype = getHaploidSample(point_estimates, recombination_rate, source_haplotypes)
individual.imputed_haplotypes = haplotype
if calling_method == 'Viterbi':
haplotype = get_viterbi(point_estimates, recombination_rate, source_haplotypes)
individual.imputed_haplotypes = haplotype
@jit(nopython=True, nogil=True)
def getHaploidDosages(hap_est, source_haplotypes):
"""Calculate dosages for a single haplotype"""
n_loci, n_haps = hap_est.shape
dosages = np.zeros(n_loci, dtype=np.float32)
for i in range(n_loci):
for j in range(n_haps):
dosages[i] += source_haplotypes[j, i] * hap_est[i, j]
return dosages
@jit(nopython=True, nogil=True)
def getHaploidSample(point_estimates, recombination_rate, source_haps):
"""Sample a haplotype"""
forward_probs = haploidForward(point_estimates, recombination_rate)
haplotype = haploidSampleHaplotype(forward_probs, source_haps, recombination_rate)
return haplotype
@jit(nopython=True, nogil=True)
def get_viterbi(point_estimates, recombination_rate, haplotype_library):
"""Get most likely haplotype using the Viterbi algorithm"""
forward_probs = haploidForward(point_estimates, recombination_rate)
indices = haploid_viterbi(forward_probs, recombination_rate)
return haplotype_from_indices(indices, haplotype_library)
@jit(nopython=True)
def haploidCallHaps(hapEst, threshold):
nHaps, nLoci = hapEst.shape
calledHaps = np.full(nLoci, -1, dtype=np.int64) # These are haplotype ids. -1 is missing.
for i in range(nLoci):
maxVal = -1
maxLoc = -1
for j in range(nHaps):
if hapEst[j, i] > threshold and hapEst[j, i] > maxVal:
maxLoc = j
maxVal = hapEst[j, i]
calledHaps[i] = maxLoc
return calledHaps
@jit(nopython=True)
def getHaploidGenotypes(calledHaps, sourceHaplotypes):
nHaps, nLoci = sourceHaplotypes.shape
calledGenotypes = np.full(nLoci, 9, dtype=np.int8) # These are haplotype ids. -1 is missing.
for i in range(nLoci):
if calledHaps[i] != -1:
calledGenotypes[i] = sourceHaplotypes[calledHaps[i], i]
return calledGenotypes
@jit(nopython=True, nogil=True)
def getHaploidPointEstimates(targetHaplotype, sourceHaplotypes, error):
nHaps, nLoci = sourceHaplotypes.shape
pointMat = np.full((nLoci, nHaps), 1, dtype=np.float32)
for i in range(nLoci):
if targetHaplotype[i] != 9:
for j in range(nHaps):
if targetHaplotype[i] == sourceHaplotypes[j, i]:
pointMat[i, j] = 1 - error[i]
else:
pointMat[i, j] = error[i]
return pointMat
@jit(nopython=True, nogil=True)
def haploidTransformProbs(previous, new, estimate, point_estimate, recombination_rate):
"""Transforms a probability distribution (over haplotypes, at a single locus)
to a probability distribution at the next locus by accounting for emission probabilities
(point_estimates) and transition probabilities (recombination_rate)
This is a core step in the forward and backward algorithms
point_estimates emission probabilities - (1D NumPy array)
recombination_rate recombination rate at this locus - (scalar)
previous probability distribution over haplotypes (hidden states) at the *previous* locus
estimate newly calculated probability distribution over haplotypes at *this* locus
new intermediate probability distribution (passed in to this function for speed)
Note: previous and estimate are updated by this function
"""
n_haps = len(previous)
# Get estimate at this locus and normalize
new[:] = previous * point_estimate
new /= np.sum(new)
# Account for recombination rate
e = recombination_rate
e1 = 1-recombination_rate
for j in range(n_haps):
new[j] = new[j]*e1 + e/n_haps
# Update distributions (in place)
for j in range(n_haps):
estimate[j] *= new[j]
previous[j] = new[j]
@jit(nopython=True, nogil=True)
def haploidOneSample(forward_probs, recombination_rate):
"""Sample one haplotype (an individual) from the forward and backward probability distributions
Returns two arrays:
sample_indices array of indices of haplotypes in the haplotype library at each locus
e.g. an individual composed of haplotypes 13 and 42 with 8 loci:
[42, 42, 42, 42, 42, 13, 13, 13]
A description of the sampling process would be nice here..."""
est = forward_probs.copy() # copy so that forward_probs is not modified
n_loci, n_haps = forward_probs.shape
prev = np.ones(n_haps, dtype=np.float32)
new = np.empty(n_haps, dtype=np.float32)
# Sampled probability distribution at one locus
sampled_probs = np.empty(n_haps, dtype=np.float32)
sample_indices = np.empty(n_loci, dtype=np.int64)
# Backwards algorithm
for i in range(n_loci-2, -1, -1): # zero indexed then minus one since we skip the boundary
# Sample at this locus
j = NumbaUtils.multinomial_sample(pvals=est[i+1, :])
sampled_probs[:] = 0
sampled_probs[j] = 1
sample_indices[i+1] = j
# Get estimate at this locus using the *sampled* distribution
# (instead of the point estimates/emission probabilities)
haploidTransformProbs(prev, new, est[i, :], sampled_probs, recombination_rate[i+1])
# No need to normalise at this locus as multinomial_sample()
# handles un-normalized probabilities
# Last sample (at the first locus)
j = NumbaUtils.multinomial_sample(pvals=est[0, :])
sample_indices[0] = j
return sample_indices
@jit(nopython=True, nogil=True)
def haploid_viterbi(forward_probs, recombination_rate):
"""Find the most likely haplotype according to the The Viterbi algorithm
Returns:
indices array of indices of haplotypes in the haplotype library at each locus
e.g. an individual composed of haplotypes 13 and 42 with 8 loci:
[42, 42, 42, 42, 42, 13, 13, 13]"""
est = forward_probs.copy() # copy so that forward_probs is not modified
n_loci, n_haps = forward_probs.shape
prev = np.ones(n_haps, dtype=np.float32)
new = np.empty(n_haps, dtype=np.float32)
# Most likely probability distribution at one locus
sampled_probs = np.empty(n_haps, dtype=np.float32)
indices = np.empty(n_loci, dtype=np.int64)
# Backwards algorithm
for i in range(n_loci-2, -1, -1): # zero indexed then minus one since we skip the boundary
# Choose the most likely state (i.e. max probability) at this locus
j = np.argmax(est[i+1, :])
sampled_probs[:] = 0
sampled_probs[j] = 1
indices[i+1] = j
# Get estimate at this locus using the most likely distribution
# (instead of the point estimates/emission probabilities)
haploidTransformProbs(prev, new, est[i, :], sampled_probs, recombination_rate[i+1])
# No need to normalise at this locus as argmax() does not depend on normalisation
# Most likely state at the first locus
j = np.argmax(est[0, :])
indices[0] = j
return indices
@jit(nopython=True, nogil=True)
def haploidSampleHaplotype(forward_probs, haplotype_library, recombination_rate):
"""Sample one haplotype (an individual) from the forward and backward probability distributions
Returns: a sampled haploytpe of length n_loci"""
indices = haploidOneSample(forward_probs, recombination_rate)
return haplotype_from_indices(indices, haplotype_library)
@jit(nopython=True, nogil=True)
def haploidForward(point_estimate, recombination_rate):
"""Calculate (unnomalized) forward probabilities"""
n_loci, n_haps = point_estimate.shape
est = point_estimate.copy()
prev = np.ones(n_haps, dtype=np.float32)
new = np.empty(n_haps, dtype=np.float32)
for i in range(1, n_loci):
# Update estimates at this locus
haploidTransformProbs(prev, new, est[i, :], point_estimate[i-1, :], recombination_rate[i])
return est
@jit(nopython=True)
def haploidBackward(point_estimate, recombination_rate):
"""Calculate (unnomalized) backward probabilities"""
n_loci, n_haps = point_estimate.shape
est = np.ones_like(point_estimate, dtype=np.float32)
prev = np.ones(n_haps, dtype=np.float32)
new = np.empty(n_haps, dtype=np.float32)
for i in range(n_loci-2, -1, -1): # zero indexed then minus one since we skip the boundary
# Update estimates at this locus
haploidTransformProbs(prev, new, est[i, :], point_estimate[i+1, :], recombination_rate[i+1])
return est
@jit(nopython=True)
def haploidForwardBackward(point_estimate, recombination_rate):
"""Calculate normalized state probabilities at each loci using the forward-backward algorithm"""
est = (haploidForward(point_estimate, recombination_rate) *
haploidBackward(point_estimate, recombination_rate))
# Return normalized probabilities
n_loci = point_estimate.shape[0]
for i in range(n_loci):
est[i, :] /= np.sum(est[i, :])
return est
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/HaploidHMM.py
|
HaploidHMM.py
|
import concurrent.futures
import itertools
import numpy as np
import math
from . import InputOutput
def convert_data_to_line(data_tuple, fmt) :
idx, data = data_tuple
return idx + ' ' + ' '.join(map(fmt, data)) + '\n'
def convert_data_to_line_plink(data_tuple, fmt):
"""Format data for PLINK plain text output"""
idx, data = data_tuple
return f"0 {idx} 0 0 0 0 {' '.join(data.astype(fmt))}\n"
def writeLines(fileName, data_list, fmt, converter=convert_data_to_line):
# print(f"Writing results to: {fileName}")
try:
iothreads = InputOutput.args.iothreads
except AttributeError as error:
iothreads = 1
with open(fileName, 'w+') as f:
if iothreads > 1:
with concurrent.futures.ProcessPoolExecutor(max_workers = iothreads) as executor: # The minus one is to account for the main thread.
# Break up into small-ish chunks to reduce overall memory cost.
# Hard code: splits into 1k individuals.
# These get then split up into one chunk per thread.
subsets = split_by(data_list, 1000)
for subset in subsets:
for result in executor.map(converter, subset, itertools.repeat(fmt), chunksize=math.ceil(1000/iothreads)):
f.write(result)
if iothreads <= 1:
for data_tuple in data_list:
result = converter(data_tuple, fmt)
f.write(result)
def writeLinesPlinkPlainTxt(fileName, data_list):
"""Write lines in PLINK plain text format"""
writeLines(fileName, data_list, fmt=str, converter=convert_data_to_line_plink)
def split_by(array, step):
output = []
i = 0
while i*step < len(array):
start = i*step
stop = (i+1)*step
output.append(array[start:stop])
i += 1
return output
def process_input_line(line, startsnp, stopsnp, dtype):
parts = line.split();
idx = parts[0]
parts = parts[1:]
if startsnp is not None :
parts = parts[startsnp : stopsnp + 1] #Offset 1 for id and 2 for id + include stopsnp
data=np.array([int(val) for val in parts], dtype = dtype)
return (idx, data)
def process_input_line_plink(line, startsnp, stopsnp, dtype):
"""Proces a line from PLINK .ped file
Fields:
0 Family ID ('FID')
1 Within-family ID ('IID'; cannot be '0')
2 Within-family ID of father ('0' if father isn't in dataset)
3 Within-family ID of mother ('0' if mother isn't in dataset)
4 Sex code ('1' = male, '2' = female, '0' = unknown)
5 Phenotype value ('1' = control, '2' = case, '-9'/'0'/non-numeric = missing data if case/control)
6-end Genotypes as pairs of alleles (A, C, G or T)
At present, this extracts individual's identifier as the within-family ID
"""
parts = line.split()
idx = parts[1] # Use within-family ID
genotypes = parts[6:]
if startsnp is not None:
genotypes = genotypes[startsnp*2: stopsnp*2 + 2] # Each locus is represented by two alleles
data = np.array(genotypes, dtype=np.bytes_)
return (idx, data)
def readLines(fileName, startsnp, stopsnp, dtype, processor=process_input_line):
# print(f"Reading in file: {fileName}")
try:
iothreads = InputOutput.args.iothreads
except AttributeError as error:
iothreads = 1
output = []
with open(fileName) as f:
if iothreads > 1:
# This could be more efficient, but it's dwarfed by some of the other stuff in the program.
# i.e. line is roughly the same size as the haplotypes (2 bytes per genotype value, i.e. (space)(value); and two values per haplotype.
all_outputs = []
lines = list(itertools.islice(f, 1000))
while len(lines) > 0:
with concurrent.futures.ProcessPoolExecutor(max_workers = iothreads) as executor:
chunk_output = executor.map(processor, lines, itertools.repeat(startsnp), itertools.repeat(stopsnp), itertools.repeat(dtype), chunksize=math.ceil(1000/iothreads))
all_outputs.append(chunk_output)
lines = list(itertools.islice(f, 1000))
output = itertools.chain.from_iterable(all_outputs)
if iothreads <= 1:
for line in f:
output.append(processor(line, startsnp = startsnp, stopsnp = stopsnp, dtype = dtype))
return output
def readLinesPlinkPlainTxt(fileName, startsnp, stopsnp, dtype):
"""Read lines in PLINK plain text format"""
return readLines(fileName, startsnp, stopsnp, dtype, processor=process_input_line_plink)
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/MultiThreadIO.py
|
MultiThreadIO.py
|
import numba
from numba import jit, float32, int8, int64, optional, boolean
from numba.experimental import jitclass
import numpy as np
def setup_individual(ind):
fillInPhaseFromGenotypes(ind.haplotypes[0], ind.genotypes)
fillInPhaseFromGenotypes(ind.haplotypes[1], ind.genotypes)
fillInGenotypesFromPhase(ind.genotypes, ind.haplotypes[0], ind.haplotypes[1])
def align_individual(ind):
# Note: The note below is no longer true.
# Note: We never directly set genotypes so no need to go from genotypes -> phase
# Fill in phase from genotypes.
fillInPhaseFromGenotypes(ind.haplotypes[0], ind.genotypes)
fillInPhaseFromGenotypes(ind.haplotypes[1], ind.genotypes)
# Fill in genotypes from phase, and fill in haplotypes from each other.
fillInGenotypesFromPhase(ind.genotypes, ind.haplotypes[0], ind.haplotypes[1])
fillInCompPhase(ind.haplotypes[0], ind.genotypes, ind.haplotypes[1])
fillInCompPhase(ind.haplotypes[1], ind.genotypes, ind.haplotypes[0])
def ind_fillInGenotypesFromPhase(ind):
#Note: We never directly set genotypes so no need to go from genotypes -> phase
fillInGenotypesFromPhase(ind.genotypes, ind.haplotypes[0], ind.haplotypes[1])
def fillFromParents(ind):
if ind.sire is not None:
if ind.sire.genotypes is not None:
sirePhase = getPhaseFromGenotypes(ind.sire.genotypes)
fillIfMissing(ind.haplotypes[0], sirePhase)
if ind.dam is not None:
if ind.dam.genotypes is not None:
damPhase = getPhaseFromGenotypes(ind.dam.genotypes)
fillIfMissing(ind.haplotypes[0], damPhase)
@jit(nopython=True)
def fillIfMissing(orig, new):
for i in range(len(orig)):
if orig[i] == 9:
orig[i] = new[i]
@jit(nopython=True)
def fillInGenotypesFromPhase(geno, phase1, phase2):
for i in range(len(geno)):
if geno[i] == 9:
if phase1[i] != 9 and phase2[i] != 9:
geno[i] = phase1[i] + phase2[i]
@jit(nopython=True)
def fillInCompPhase(target, geno, compPhase):
for i in range(len(geno)):
if target[i] == 9:
if geno[i] != 9:
if compPhase[i] != 9:
target[i] = geno[i] - compPhase[i]
@jit(nopython=True)
def fillInPhaseFromGenotypes(phase, geno):
for i in range(len(geno)):
if phase[i] == 9 :
if geno[i] == 0: phase[i] = 0
if geno[i] == 2: phase[i] = 1
@jit(nopython=True)
def getPhaseFromGenotypes(geno):
phase = np.full(len(geno), 9, dtype = np.int8)
for i in range(len(geno)):
if phase[i] == 9 :
if geno[i] == 0: phase[i] = 0
if geno[i] == 2: phase[i] = 1
return phase
# def ind_randomlyPhaseRandomPoint(ind):
# maxLength = len(ind.genotypes)
# midpoint = np.random.normal(maxLength/2, maxLength/10)
# while midpoint < 0 or midpoint > maxLength:
# midpoint = np.random.normal(maxLength/2, maxLength/10)
# midpoint = int(midpoint)
# randomlyPhaseMidpoint(ind.genotypes, ind.haplotypes, midpoint)
# def ind_randomlyPhaseMidpoint(ind, midpoint= None):
# if midpoint is None: midpoint = int(len(ind.genotypes)/2)
# randomlyPhaseMidpoint(ind.genotypes, ind.haplotypes, midpoint)
# @jit(nopython=True)
# def randomlyPhaseMidpoint(geno, phase, midpoint):
# index = 0
# e = 1
# changed = False
# while not changed :
# if geno[midpoint + index * e] == 1:
# phase[0][midpoint + index * e] = 0
# phase[1][midpoint + index * e] = 1
# changed = True
# e = -e
# if e == -1: index += 1
# if index >= midpoint: changed = True
|
AlphaPeel
|
/AlphaPeel-1.1.3.tar.gz/AlphaPeel-1.1.3/src/tinypeel/tinyhouse/HaplotypeOperations.py
|
HaplotypeOperations.py
|
# alphabetPy

[](LICENSE.txt)
[](https://pypi.python.org/pypi/AlphabetPy/1.0)
[]()
A python tool to output characters in console/shell imitates starkwang's alphabetJS
> 此项目是我在使用[@starkwang](https://github.com/starkwang)的作品[alphabetJS](https://github.com/starkwang)时,感觉十分方便,于是将其翻译成python。由于对开源协议不是十分了解,如有侵权,请及时联系我加以修改。
> When I use [@starkwang](https://github.com/starkwang)'s project [alphabetJS](https://github.com/starkwang), I think it's very convenient. So I write it again in Python. Because I'm not familiar with LGPL, if there's any infringement, please contact me immediately and I'll do something to fix it.
## Install
### 1.use `pip` to install it
`pip install AlphabetPy`
### 2.download this project and use `setup.py`
```shell
$ cd alphabetPy
$ python setup.py install
```
## Usage
After install `alphabetPy`, you can require it in shell:
```shell
$ AlphabetPy
input the string you want to change:<what you want to output>
input the mode you want to use:("planar" or "stereo"):<choose "planar" or "stereo">
```
for example:

you can also use the command:
`alphabetPy -s <what you want to output> -m <choose "planar" or "stereo">`
for example:

if you don't use the `-m`, it's be use mode "planar" to output the characters.
if you want to use `alphabetPy` in your own project, you can import `alphabetPy`:
for example:
```python
import AlphabetPy
string = "AlphabetPy"
letter = AlphabetPy.getAlphabet("233","stereo")
for i in range(7):
print(letter[i])
```
Then you'll see the outputs in shell.
However, I think my code is stupid which make us cannot print letter directly. Maybe I'll improve my code someday.
## License
### The MIT License (MIT)
CopyRight (c) 2018 Ray Zhao <<a href="[email protected]">[email protected]</a>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
|
AlphabetPy
|
/AlphabetPy-1.0.1.tar.gz/AlphabetPy-1.0.1/README.md
|
README.md
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.