code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from operator import gt, lt
from .libmp.backend import xrange
from .functions.functions import SpecialFunctions
from .functions.rszeta import RSCache
from .calculus.quadrature import QuadratureMethods
from .calculus.calculus import CalculusMethods
from .calculus.optimization import OptimizationMethods
from .calculus.odes import ODEMethods
from .matrices.matrices import MatrixMethods
from .matrices.calculus import MatrixCalculusMethods
from .matrices.linalg import LinearAlgebraMethods
from .identification import IdentificationMethods
from .visualization import VisualizationMethods
import libmp
class Context(object):
pass
class StandardBaseContext(Context,
SpecialFunctions,
RSCache,
QuadratureMethods,
CalculusMethods,
MatrixMethods,
MatrixCalculusMethods,
LinearAlgebraMethods,
IdentificationMethods,
OptimizationMethods,
ODEMethods,
VisualizationMethods):
NoConvergence = libmp.NoConvergence
ComplexResult = libmp.ComplexResult
def __init__(ctx):
ctx._aliases = {}
# Call those that need preinitialization (e.g. for wrappers)
SpecialFunctions.__init__(ctx)
RSCache.__init__(ctx)
QuadratureMethods.__init__(ctx)
CalculusMethods.__init__(ctx)
MatrixMethods.__init__(ctx)
def _init_aliases(ctx):
for alias, value in ctx._aliases.items():
try:
setattr(ctx, alias, getattr(ctx, value))
except AttributeError:
pass
_fixed_precision = False
# XXX
verbose = False
def warn(ctx, msg):
print("Warning:", msg)
def bad_domain(ctx, msg):
raise ValueError(msg)
def _re(ctx, x):
if hasattr(x, "real"):
return x.real
return x
def _im(ctx, x):
if hasattr(x, "imag"):
return x.imag
return ctx.zero
def _as_points(ctx, x):
return x
def fneg(ctx, x, **kwargs):
return -ctx.convert(x)
def fadd(ctx, x, y, **kwargs):
return ctx.convert(x)+ctx.convert(y)
def fsub(ctx, x, y, **kwargs):
return ctx.convert(x)-ctx.convert(y)
def fmul(ctx, x, y, **kwargs):
return ctx.convert(x)*ctx.convert(y)
def fdiv(ctx, x, y, **kwargs):
return ctx.convert(x)/ctx.convert(y)
def fsum(ctx, args, absolute=False, squared=False):
if absolute:
if squared:
return sum((abs(x)**2 for x in args), ctx.zero)
return sum((abs(x) for x in args), ctx.zero)
if squared:
return sum((x**2 for x in args), ctx.zero)
return sum(args, ctx.zero)
def fdot(ctx, xs, ys=None, conjugate=False):
if ys is not None:
xs = zip(xs, ys)
if conjugate:
cf = ctx.conj
return sum((x*cf(y) for (x,y) in xs), ctx.zero)
else:
return sum((x*y for (x,y) in xs), ctx.zero)
def fprod(ctx, args):
prod = ctx.one
for arg in args:
prod *= arg
return prod
def nprint(ctx, x, n=6, **kwargs):
"""
Equivalent to ``print(nstr(x, n))``.
"""
print(ctx.nstr(x, n, **kwargs))
def chop(ctx, x, tol=None):
"""
Chops off small real or imaginary parts, or converts
numbers close to zero to exact zeros. The input can be a
single number or an iterable::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> chop(5+1e-10j, tol=1e-9)
mpf('5.0')
>>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2]))
[1.0, 0.0, 3.0, -4.0, 2.0]
The tolerance defaults to ``100*eps``.
"""
if tol is None:
tol = 100*ctx.eps
try:
x = ctx.convert(x)
absx = abs(x)
if abs(x) < tol:
return ctx.zero
if ctx._is_complex_type(x):
#part_tol = min(tol, absx*tol)
part_tol = max(tol, absx*tol)
if abs(x.imag) < part_tol:
return x.real
if abs(x.real) < part_tol:
return ctx.mpc(0, x.imag)
except TypeError:
if isinstance(x, ctx.matrix):
return x.apply(lambda a: ctx.chop(a, tol))
if hasattr(x, "__iter__"):
return [ctx.chop(a, tol) for a in x]
return x
def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
r"""
Determine whether the difference between `s` and `t` is smaller
than a given epsilon, either relatively or absolutely.
Both a maximum relative difference and a maximum difference
('epsilons') may be specified. The absolute difference is
defined as `|s-t|` and the relative difference is defined
as `|s-t|/\max(|s|, |t|)`.
If only one epsilon is given, both are set to the same value.
If none is given, both epsilons are set to `2^{-p+m}` where
`p` is the current working precision and `m` is a small
integer. The default setting typically allows :func:`~mpmath.almosteq`
to be used to check for mathematical equality
in the presence of small rounding errors.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15
>>> almosteq(3.141592653589793, 3.141592653589790)
True
>>> almosteq(3.141592653589793, 3.141592653589700)
False
>>> almosteq(3.141592653589793, 3.141592653589700, 1e-10)
True
>>> almosteq(1e-20, 2e-20)
True
>>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0)
False
"""
t = ctx.convert(t)
if abs_eps is None and rel_eps is None:
rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4)
if abs_eps is None:
abs_eps = rel_eps
elif rel_eps is None:
rel_eps = abs_eps
diff = abs(s-t)
if diff <= abs_eps:
return True
abss = abs(s)
abst = abs(t)
if abss < abst:
err = diff/abst
else:
err = diff/abss
return err <= rel_eps
def arange(ctx, *args):
r"""
This is a generalized version of Python's :func:`~mpmath.range` function
that accepts fractional endpoints and step sizes and
returns a list of ``mpf`` instances. Like :func:`~mpmath.range`,
:func:`~mpmath.arange` can be called with 1, 2 or 3 arguments:
``arange(b)``
`[0, 1, 2, \ldots, x]`
``arange(a, b)``
`[a, a+1, a+2, \ldots, x]`
``arange(a, b, h)``
`[a, a+h, a+h, \ldots, x]`
where `b-1 \le x < b` (in the third case, `b-h \le x < b`).
Like Python's :func:`~mpmath.range`, the endpoint is not included. To
produce ranges where the endpoint is included, :func:`~mpmath.linspace`
is more convenient.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> arange(4)
[mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')]
>>> arange(1, 2, 0.25)
[mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')]
>>> arange(1, -1, -0.75)
[mpf('1.0'), mpf('0.25'), mpf('-0.5')]
"""
if not len(args) <= 3:
raise TypeError('arange expected at most 3 arguments, got %i'
% len(args))
if not len(args) >= 1:
raise TypeError('arange expected at least 1 argument, got %i'
% len(args))
# set default
a = 0
dt = 1
# interpret arguments
if len(args) == 1:
b = args[0]
elif len(args) >= 2:
a = args[0]
b = args[1]
if len(args) == 3:
dt = args[2]
a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt)
assert a + dt != a, 'dt is too small and would cause an infinite loop'
# adapt code for sign of dt
if a > b:
if dt > 0:
return []
op = gt
else:
if dt < 0:
return []
op = lt
# create list
result = []
i = 0
t = a
while 1:
t = a + dt*i
i += 1
if op(t, b):
result.append(t)
else:
break
return result
def linspace(ctx, *args, **kwargs):
"""
``linspace(a, b, n)`` returns a list of `n` evenly spaced
samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)``
is also valid.
This function is often more convenient than :func:`~mpmath.arange`
for partitioning an interval into subintervals, since
the endpoint is included::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> linspace(1, 4, 4)
[mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')]
You may also provide the keyword argument ``endpoint=False``::
>>> linspace(1, 4, 4, endpoint=False)
[mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')]
"""
if len(args) == 3:
a = ctx.mpf(args[0])
b = ctx.mpf(args[1])
n = int(args[2])
elif len(args) == 2:
assert hasattr(args[0], '_mpi_')
a = args[0].a
b = args[0].b
n = int(args[1])
else:
raise TypeError('linspace expected 2 or 3 arguments, got %i' \
% len(args))
if n < 1:
raise ValueError('n must be greater than 0')
if not 'endpoint' in kwargs or kwargs['endpoint']:
if n == 1:
return [ctx.mpf(a)]
step = (b - a) / ctx.mpf(n - 1)
y = [i*step + a for i in xrange(n)]
y[-1] = b
else:
step = (b - a) / ctx.mpf(n)
y = [i*step + a for i in xrange(n)]
return y
def cos_sin(ctx, z, **kwargs):
return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs)
def cospi_sinpi(ctx, z, **kwargs):
return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs)
def _default_hyper_maxprec(ctx, p):
return int(1000 * p**0.25 + 4*p)
_gcd = staticmethod(libmp.gcd)
list_primes = staticmethod(libmp.list_primes)
isprime = staticmethod(libmp.isprime)
bernfrac = staticmethod(libmp.bernfrac)
moebius = staticmethod(libmp.moebius)
_ifac = staticmethod(libmp.ifac)
_eulernum = staticmethod(libmp.eulernum)
def sum_accurately(ctx, terms, check_step=1):
prec = ctx.prec
try:
extraprec = 10
while 1:
ctx.prec = prec + extraprec + 5
max_mag = ctx.ninf
s = ctx.zero
k = 0
for term in terms():
s += term
if (not k % check_step) and term:
term_mag = ctx.mag(term)
max_mag = max(max_mag, term_mag)
sum_mag = ctx.mag(s)
if sum_mag - term_mag > ctx.prec:
break
k += 1
cancellation = max_mag - sum_mag
if cancellation != cancellation:
break
if cancellation < extraprec or ctx._fixed_precision:
break
extraprec += min(ctx.prec, cancellation)
return s
finally:
ctx.prec = prec
def mul_accurately(ctx, factors, check_step=1):
prec = ctx.prec
try:
extraprec = 10
while 1:
ctx.prec = prec + extraprec + 5
max_mag = ctx.ninf
one = ctx.one
s = one
k = 0
for factor in factors():
s *= factor
term = factor - one
if (not k % check_step):
term_mag = ctx.mag(term)
max_mag = max(max_mag, term_mag)
sum_mag = ctx.mag(s-one)
#if sum_mag - term_mag > ctx.prec:
# break
if -term_mag > ctx.prec:
break
k += 1
cancellation = max_mag - sum_mag
if cancellation != cancellation:
break
if cancellation < extraprec or ctx._fixed_precision:
break
extraprec += min(ctx.prec, cancellation)
return s
finally:
ctx.prec = prec
def power(ctx, x, y):
r"""Converts `x` and `y` to mpmath numbers and evaluates
`x^y = \exp(y \log(x))`::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> power(2, 0.5)
1.41421356237309504880168872421
This shows the leading few digits of a large Mersenne prime
(performing the exact calculation ``2**43112609-1`` and
displaying the result in Python would be very slow)::
>>> power(2, 43112609)-1
3.16470269330255923143453723949e+12978188
"""
return ctx.convert(x) ** ctx.convert(y)
def _zeta_int(ctx, n):
return ctx.zeta(n)
def maxcalls(ctx, f, N):
"""
Return a wrapped copy of *f* that raises ``NoConvergence`` when *f*
has been called more than *N* times::
>>> from mpmath import *
>>> mp.dps = 15
>>> f = maxcalls(sin, 10)
>>> print(sum(f(n) for n in range(10)))
1.95520948210738
>>> f(10)
Traceback (most recent call last):
...
NoConvergence: maxcalls: function evaluated 10 times
"""
counter = [0]
def f_maxcalls_wrapped(*args, **kwargs):
counter[0] += 1
if counter[0] > N:
raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N)
return f(*args, **kwargs)
return f_maxcalls_wrapped
def memoize(ctx, f):
"""
Return a wrapped copy of *f* that caches computed values, i.e.
a memoized copy of *f*. Values are only reused if the cached precision
is equal to or higher than the working precision::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = memoize(maxcalls(sin, 1))
>>> f(2)
0.909297426825682
>>> f(2)
0.909297426825682
>>> mp.dps = 25
>>> f(2)
Traceback (most recent call last):
...
NoConvergence: maxcalls: function evaluated 1 times
"""
f_cache = {}
def f_cached(*args, **kwargs):
if kwargs:
key = args, tuple(kwargs.items())
else:
key = args
prec = ctx.prec
if key in f_cache:
cprec, cvalue = f_cache[key]
if cprec >= prec:
return +cvalue
value = f(*args, **kwargs)
f_cache[key] = (prec, value)
return value
f_cached.__name__ = f.__name__
f_cached.__doc__ = f.__doc__
return f_cached
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/ctx_base.py
|
ctx_base.py
|
__docformat__ = 'plaintext'
import re
from .ctx_base import StandardBaseContext
from .libmp.backend import basestring
import libmp
from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
ComplexResult, to_pickable, from_pickable, normalize,
from_int, from_float, from_str, to_int, to_float, to_str,
from_rational, from_man_exp,
fone, fzero, finf, fninf, fnan,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
mpf_hash, mpf_rand,
mpf_sum,
bitcount, to_fixed,
mpc_to_str,
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_mpf_div,
mpf_pow,
mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
mpf_glaisher, mpf_twinprime, mpf_mertens,
int_types)
import function_docs
import rational
new = object.__new__
get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?)??'
r'(?P<im>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?j)?\)?$')
try:
from sage.libs.mpmath.ext_main import Context as BaseMPContext
# pickle hack
import sage.libs.mpmath.ext_main as _mpf_module
except ImportError:
from .ctx_mp_python import PythonMPContext as BaseMPContext
import ctx_mp_python as _mpf_module
from .ctx_mp_python import _mpf, _mpc, mpnumeric
class MPContext(BaseMPContext, StandardBaseContext):
"""
Context for multiprecision arithmetic with a global precision.
"""
def __init__(ctx):
BaseMPContext.__init__(ctx)
ctx.trap_complex = False
ctx.pretty = False
ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
ctx._mpq = rational.mpq
ctx.default()
StandardBaseContext.__init__(ctx)
ctx.mpq = rational.mpq
ctx.init_builtins()
ctx.hyp_summators = {}
ctx._init_aliases()
# XXX: automate
try:
ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
ctx.primepi.im_func.func_doc = function_docs.primepi
ctx.psi.im_func.func_doc = function_docs.psi
ctx.atan2.im_func.func_doc = function_docs.atan2
except AttributeError:
# python 3
ctx.bernoulli.__func__.func_doc = function_docs.bernoulli
ctx.primepi.__func__.func_doc = function_docs.primepi
ctx.psi.__func__.func_doc = function_docs.psi
ctx.atan2.__func__.func_doc = function_docs.atan2
ctx.digamma.func_doc = function_docs.digamma
ctx.cospi.func_doc = function_docs.cospi
ctx.sinpi.func_doc = function_docs.sinpi
def init_builtins(ctx):
mpf = ctx.mpf
mpc = ctx.mpc
# Exact constants
ctx.one = ctx.make_mpf(fone)
ctx.zero = ctx.make_mpf(fzero)
ctx.j = ctx.make_mpc((fzero,fone))
ctx.inf = ctx.make_mpf(finf)
ctx.ninf = ctx.make_mpf(fninf)
ctx.nan = ctx.make_mpf(fnan)
eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
"epsilon of working precision", "eps")
ctx.eps = eps
# Approximate constants
ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
# Standard functions
ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
ctx.gamma_old = ctx._wrap_libmp_function(libmp.mpf_gamma_old, libmp.mpc_gamma_old)
ctx.fac_old = ctx.factorial_old = ctx._wrap_libmp_function(libmp.mpf_factorial_old, libmp.mpc_factorial_old)
ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
# Faster versions
ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt)
ctx.exp = getattr(ctx, "_sage_exp", ctx.exp)
ctx.ln = getattr(ctx, "_sage_ln", ctx.ln)
ctx.cos = getattr(ctx, "_sage_cos", ctx.cos)
ctx.sin = getattr(ctx, "_sage_sin", ctx.sin)
def to_fixed(ctx, x, prec):
return x.to_fixed(prec)
def hypot(ctx, x, y):
r"""
Computes the Euclidean norm of the vector `(x, y)`, equal
to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
def _gamma_upper_int(ctx, n, z):
n = int(ctx._re(n))
if n == 0:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _expint_int(ctx, n, z):
n = int(n)
if n == 1:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _nthroot(ctx, x, n):
if hasattr(x, '_mpf_'):
try:
return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
except ComplexResult:
if ctx.trap_complex:
raise
x = (x._mpf_, libmp.fzero)
else:
x = x._mpc_
return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
def _besselj(ctx, n, z):
prec, rounding = ctx._prec_rounding
if hasattr(z, '_mpf_'):
return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
elif hasattr(z, '_mpc_'):
return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
def _agm(ctx, a, b=1):
prec, rounding = ctx._prec_rounding
if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
try:
v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
return ctx.make_mpf(v)
except ComplexResult:
pass
if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
else: a = a._mpc_
if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
else: b = b._mpc_
return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
def bernoulli(ctx, n):
return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
def _zeta_int(ctx, n):
return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
def atan2(ctx, y, x):
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
def psi(ctx, m, z):
z = ctx.convert(z)
m = int(m)
if ctx._is_real_type(z):
return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
else:
return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
def cos_sin(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def cospi_sinpi(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def clone(ctx):
"""
Create a copy of the context, with the same working precision.
"""
a = ctx.__class__()
a.prec = ctx.prec
return a
# Several helper methods
# TODO: add more of these, make consistent, write docstrings, ...
def _is_real_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return False
return True
def _is_complex_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return True
return False
def isnpint(ctx, x):
"""
Determine if *x* is a nonpositive integer.
"""
if not x:
return True
if hasattr(x, '_mpf_'):
sign, man, exp, bc = x._mpf_
return sign and exp >= 0
if hasattr(x, '_mpc_'):
return not x.imag and ctx.isnpint(x.real)
if type(x) in int_types:
return x <= 0
if isinstance(x, ctx.mpq):
p, q = x._mpq_
if not p:
return True
return q == 1 and p <= 0
return ctx.isnpint(ctx.convert(x))
def __str__(ctx):
lines = ["Mpmath settings:",
(" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
(" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
(" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
]
return "\n".join(lines)
@property
def _repr_digits(ctx):
return repr_dps(ctx._prec)
@property
def _str_digits(ctx):
return ctx._dps
def extraprec(ctx, n, normalize_output=False):
"""
The block
with extraprec(n):
<code>
increases the precision n bits, executes <code>, and then
restores the precision.
extraprec(n)(f) returns a decorated version of the function f
that increases the working precision by n bits before execution,
and restores the parent precision afterwards. With
normalize_output=True, it rounds the return value to the parent
precision.
"""
return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
def extradps(ctx, n, normalize_output=False):
"""
This function is analogous to extraprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
def workprec(ctx, n, normalize_output=False):
"""
The block
with workprec(n):
<code>
sets the precision to n bits, executes <code>, and then restores
the precision.
workprec(n)(f) returns a decorated version of the function f
that sets the precision to n bits before execution,
and restores the precision afterwards. With normalize_output=True,
it rounds the return value to the parent precision.
"""
return PrecisionManager(ctx, lambda p: n, None, normalize_output)
def workdps(ctx, n, normalize_output=False):
"""
This function is analogous to workprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: n, normalize_output)
def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
"""
Return a wrapped copy of *f* that repeatedly evaluates *f*
with increasing precision until the result converges to the
full precision used at the point of the call.
This heuristically protects against rounding errors, at the cost of
roughly a 2x slowdown compared to manually setting the optimal
precision. This method can, however, easily be fooled if the results
from *f* depend "discontinuously" on the precision, for instance
if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
should be used judiciously.
**Examples**
Many functions are sensitive to perturbations of the input arguments.
If the arguments are decimal numbers, they may have to be converted
to binary at a much higher precision. If the amount of required
extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
>>> from mpmath import *
>>> mp.dps = 15
>>> mp.pretty = True
>>> besselj(5, 125 * 10**28) # Exact input
-8.03284785591801e-17
>>> besselj(5, '1.25e30') # Bad
7.12954868316652e-16
>>> autoprec(besselj)(5, '1.25e30') # Good
-8.03284785591801e-17
The following fails to converge because `\sin(\pi) = 0` whereas all
finite-precision approximations of `\pi` give nonzero values::
>>> autoprec(sin)(pi)
Traceback (most recent call last):
...
NoConvergence: autoprec: prec increased to 2910 without convergence
As the following example shows, :func:`~mpmath.autoprec` can protect against
cancellation, but is fooled by too severe cancellation::
>>> x = 1e-10
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
1.00000008274037e-10
1.00000000005e-10
1.00000000005e-10
>>> x = 1e-50
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
0.0
1.0e-50
0.0
With *catch*, an exception or list of exceptions to intercept
may be specified. The raised exception is interpreted
as signaling insufficient precision. This permits, for example,
evaluating a function where a too low precision results in a
division by zero::
>>> f = lambda x: 1/(exp(x)-1)
>>> f(1e-30)
Traceback (most recent call last):
...
ZeroDivisionError
>>> autoprec(f, catch=ZeroDivisionError)(1e-30)
1.0e+30
"""
def f_autoprec_wrapped(*args, **kwargs):
prec = ctx.prec
if maxprec is None:
maxprec2 = ctx._default_hyper_maxprec(prec)
else:
maxprec2 = maxprec
try:
ctx.prec = prec + 10
try:
v1 = f(*args, **kwargs)
except catch:
v1 = ctx.nan
prec2 = prec + 20
while 1:
ctx.prec = prec2
try:
v2 = f(*args, **kwargs)
except catch:
v2 = ctx.nan
if v1 == v2:
break
err = ctx.mag(v2-v1) - ctx.mag(v2)
if err < (-prec):
break
if verbose:
print("autoprec: target=%s, prec=%s, accuracy=%s" \
% (prec, prec2, -err))
v1 = v2
if prec2 >= maxprec2:
raise ctx.NoConvergence(\
"autoprec: prec increased to %i without convergence"\
% prec2)
prec2 += int(prec2*2)
prec2 = min(prec2, maxprec2)
finally:
ctx.prec = prec
return +v2
return f_autoprec_wrapped
def nstr(ctx, x, n=6, **kwargs):
"""
Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
significant digits. The small default value for *n* is chosen to
make this function useful for printing collections of numbers
(lists, matrices, etc).
If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
to each element. For unrecognized classes, :func:`~mpmath.nstr`
simply returns ``str(x)``.
The companion function :func:`~mpmath.nprint` prints the result
instead of returning it.
>>> from mpmath import *
>>> nstr([+pi, ldexp(1,-500)])
'[3.14159, 3.05494e-151]'
>>> nprint([+pi, ldexp(1,-500)])
[3.14159, 3.05494e-151]
"""
if isinstance(x, list):
return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if isinstance(x, tuple):
return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if hasattr(x, '_mpf_'):
return to_str(x._mpf_, n, **kwargs)
if hasattr(x, '_mpc_'):
return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
if isinstance(x, basestring):
return repr(x)
if isinstance(x, ctx.matrix):
return x.__nstr__(n, **kwargs)
return str(x)
def _convert_fallback(ctx, x, strings):
if strings and isinstance(x, basestring):
if 'j' in x.lower():
x = x.lower().replace(' ', '')
match = get_complex.match(x)
re = match.group('re')
if not re:
re = 0
im = match.group('im').rstrip('j')
return ctx.mpc(ctx.convert(re), ctx.convert(im))
if hasattr(x, "_mpi_"):
a, b = x._mpi_
if a == b:
return ctx.make_mpf(a)
else:
raise ValueError("can only create mpf from zero-width interval")
raise TypeError("cannot create mpf from " + repr(x))
def mpmathify(ctx, *args, **kwargs):
return ctx.convert(*args, **kwargs)
def _parse_prec(ctx, kwargs):
if kwargs:
if kwargs.get('exact'):
return 0, 'f'
prec, rounding = ctx._prec_rounding
if 'rounding' in kwargs:
rounding = kwargs['rounding']
if 'prec' in kwargs:
prec = kwargs['prec']
if prec == ctx.inf:
return 0, 'f'
else:
prec = int(prec)
elif 'dps' in kwargs:
dps = kwargs['dps']
if dps == ctx.inf:
return 0, 'f'
prec = dps_to_prec(dps)
return prec, rounding
return ctx._prec_rounding
_exact_overflow_msg = "the exact result does not fit in memory"
_hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
using a working precision of %i bits. Try with a higher maxprec,
maxterms, or set zeroprec."""
def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
if hasattr(z, "_mpf_"):
key = p, q, flags, 'R'
v = z._mpf_
elif hasattr(z, "_mpc_"):
key = p, q, flags, 'C'
v = z._mpc_
if key not in ctx.hyp_summators:
ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
summator = ctx.hyp_summators[key]
prec = ctx.prec
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
extraprec = 50
epsshift = 25
# Jumps in magnitude occur when parameters are close to negative
# integers. We must ensure that these terms are included in
# the sum and added accurately
magnitude_check = {}
max_total_jump = 0
for i, c in enumerate(coeffs):
if flags[i] == 'Z':
if i >= p and c <= 0:
ok = False
for ii, cc in enumerate(coeffs[:p]):
# Note: c <= cc or c < cc, depending on convention
if flags[ii] == 'Z' and cc <= 0 and c <= cc:
ok = True
if not ok:
raise ZeroDivisionError("pole in hypergeometric series")
continue
n, d = ctx.nint_distance(c)
n = -int(n)
d = -d
if i >= p and n >= 0 and d > 4:
if n in magnitude_check:
magnitude_check[n] += d
else:
magnitude_check[n] = d
extraprec = max(extraprec, d - prec + 60)
max_total_jump += abs(d)
while 1:
if extraprec > maxprec:
raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
wp = prec + extraprec
if magnitude_check:
mag_dict = dict((n,None) for n in magnitude_check)
else:
mag_dict = {}
zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
epsshift, mag_dict, **kwargs)
cancel = -magnitude
jumps_resolved = True
if extraprec < max_total_jump:
for n in mag_dict.values():
if (n is None) or (n < prec):
jumps_resolved = False
break
accurate = (cancel < extraprec-25-5 or not accurate_small)
if jumps_resolved:
if accurate:
break
# zero?
zeroprec = kwargs.get('zeroprec')
if zeroprec is not None:
if cancel > zeroprec:
if have_complex:
return ctx.mpc(0)
else:
return ctx.zero
# Some near-singularities were not included, so increase
# precision and repeat until they are
extraprec *= 2
# Possible workaround for bad roundoff in fixed-point arithmetic
epsshift += 5
extraprec += 5
if type(zv) is tuple:
if have_complex:
return ctx.make_mpc(zv)
else:
return ctx.make_mpf(zv)
else:
return zv
def ldexp(ctx, x, n):
r"""
Computes `x 2^n` efficiently. No rounding is performed.
The argument `x` must be a real floating-point number (or
possible to convert into one) and `n` must be a Python ``int``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> ldexp(1, 10)
mpf('1024.0')
>>> ldexp(1, -3)
mpf('0.125')
"""
x = ctx.convert(x)
return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
def frexp(ctx, x):
r"""
Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
`n` a Python integer, and such that `x = y 2^n`. No rounding is
performed.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> frexp(7.5)
(mpf('0.9375'), 3)
"""
x = ctx.convert(x)
y, n = libmp.mpf_frexp(x._mpf_)
return ctx.make_mpf(y), n
def fneg(ctx, x, **kwargs):
"""
Negates the number *x*, giving a floating-point result, optionally
using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
An mpmath number is returned::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fneg(2.5)
mpf('-2.5')
>>> fneg(-5+2j)
mpc(real='5.0', imag='-2.0')
Precise control over rounding is possible::
>>> x = fadd(2, 1e-100, exact=True)
>>> fneg(x)
mpf('-2.0')
>>> fneg(x, rounding='f')
mpf('-2.0000000000000004')
Negating with and without roundoff::
>>> n = 200000000000000000000001
>>> print(int(-mpf(n)))
-200000000000000016777216
>>> print(int(fneg(n)))
-200000000000000016777216
>>> print(int(fneg(n, prec=log(n,2)+1)))
-200000000000000000000001
>>> print(int(fneg(n, dps=log(n,10)+1)))
-200000000000000000000001
>>> print(int(fneg(n, prec=inf)))
-200000000000000000000001
>>> print(int(fneg(n, dps=inf)))
-200000000000000000000001
>>> print(int(fneg(n, exact=True)))
-200000000000000000000001
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
if hasattr(x, '_mpf_'):
return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fadd(ctx, x, y, **kwargs):
"""
Adds the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
The default precision is the working precision of the context.
You can specify a custom precision in bits by passing the *prec* keyword
argument, or by providing an equivalent decimal precision with the *dps*
keyword argument. If the precision is set to ``+inf``, or if the flag
*exact=True* is passed, an exact addition with no rounding is performed.
When the precision is finite, the optional *rounding* keyword argument
specifies the direction of rounding. Valid options are ``'n'`` for
nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
for down, ``'u'`` for up.
**Examples**
Using :func:`~mpmath.fadd` with precision and rounding control::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fadd(2, 1e-20)
mpf('2.0')
>>> fadd(2, 1e-20, rounding='u')
mpf('2.0000000000000004')
>>> nprint(fadd(2, 1e-20, prec=100), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fadd(2, 1e-20, dps=25), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, exact=True), 25)
2.00000000000000000001
Exact addition avoids cancellation errors, enforcing familiar laws
of numbers such as `x+y-x = y`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e-1000')
>>> print(x + y - x)
0.0
>>> print(fadd(x, y, prec=inf) - x)
1.0e-1000
>>> print(fadd(x, y, exact=True) - x)
1.0e-1000
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fadd(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fsub(ctx, x, y, **kwargs):
"""
Subtracts the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
Using :func:`~mpmath.fsub` with precision and rounding control::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fsub(2, 1e-20)
mpf('2.0')
>>> fsub(2, 1e-20, rounding='d')
mpf('1.9999999999999998')
>>> nprint(fsub(2, 1e-20, prec=100), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fsub(2, 1e-20, dps=25), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, exact=True), 25)
1.99999999999999999999
Exact subtraction avoids cancellation errors, enforcing familiar laws
of numbers such as `x-y+y = x`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e1000')
>>> print(x - y + y)
0.0
>>> print(fsub(x, y, prec=inf) + y)
2.0
>>> print(fsub(x, y, exact=True) + y)
2.0
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fsub(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fmul(ctx, x, y, **kwargs):
"""
Multiplies the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fmul(2, 5.0)
mpf('10.0')
>>> fmul(0.5j, 0.5)
mpc(real='0.0', imag='0.25')
Avoiding roundoff::
>>> x, y = 10**10+1, 10**15+1
>>> print(x*y)
10000000001000010000000001
>>> print(mpf(x) * mpf(y))
1.0000000001e+25
>>> print(int(mpf(x) * mpf(y)))
10000000001000011026399232
>>> print(int(fmul(x, y)))
10000000001000011026399232
>>> print(int(fmul(x, y, dps=25)))
10000000001000010000000001
>>> print(int(fmul(x, y, exact=True)))
10000000001000010000000001
Exact multiplication with complex numbers can be inefficient and may
be impossible to perform with large magnitude differences between
real and imaginary parts::
>>> x = 1+2j
>>> y = mpc(2, '1e-100000000000000000000')
>>> fmul(x, y)
mpc(real='2.0', imag='4.0')
>>> fmul(x, y, rounding='u')
mpc(real='2.0', imag='4.0000000000000009')
>>> fmul(x, y, exact=True)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fdiv(ctx, x, y, **kwargs):
"""
Divides the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fdiv(3, 2)
mpf('1.5')
>>> fdiv(2, 3)
mpf('0.66666666666666663')
>>> fdiv(2+4j, 0.5)
mpc(real='4.0', imag='8.0')
The rounding direction and precision can be controlled::
>>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
mpf('0.6666259765625')
>>> fdiv(2, 3, rounding='d')
mpf('0.66666666666666663')
>>> fdiv(2, 3, prec=60)
mpf('0.66666666666666667')
>>> fdiv(2, 3, rounding='u')
mpf('0.66666666666666674')
Checking the error of a division by performing it at higher precision::
>>> fdiv(2, 3) - fdiv(2, 3, prec=100)
mpf('-3.7007434154172148e-17')
Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
allowed since the quotient of two floating-point numbers generally
does not have an exact floating-point representation. (In the
future this might be changed to allow the case where the division
is actually exact.)
>>> fdiv(2, 3, exact=True)
Traceback (most recent call last):
...
ValueError: division is not an exact operation
"""
prec, rounding = ctx._parse_prec(kwargs)
if not prec:
raise ValueError("division is not an exact operation")
x = ctx.convert(x)
y = ctx.convert(y)
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def nint_distance(ctx, x):
r"""
Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
(measured in bits) lost to cancellation when computing `x-n`.
>>> from mpmath import *
>>> n, d = nint_distance(5)
>>> print(n); print(d)
5
-inf
>>> n, d = nint_distance(mpf(5))
>>> print(n); print(d)
5
-inf
>>> n, d = nint_distance(mpf(5.00000001))
>>> print(n); print(d)
5
-26
>>> n, d = nint_distance(mpf(4.99999999))
>>> print(n); print(d)
5
-26
>>> n, d = nint_distance(mpc(5,10))
>>> print(n); print(d)
5
4
>>> n, d = nint_distance(mpc(5,0.000001))
>>> print(n); print(d)
5
-19
"""
typx = type(x)
if typx in int_types:
return int(x), ctx.ninf
elif typx is rational.mpq:
p, q = x._mpq_
n, r = divmod(p, q)
if 2*r >= q:
n += 1
elif not r:
return n, ctx.ninf
# log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
d = bitcount(abs(p-n*q)) - bitcount(q)
return n, d
if hasattr(x, "_mpf_"):
re = x._mpf_
im_dist = ctx.ninf
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
isign, iman, iexp, ibc = im
if iman:
im_dist = iexp + ibc
elif im == fzero:
im_dist = ctx.ninf
else:
raise ValueError("requires a finite number")
else:
x = ctx.convert(x)
if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
return ctx.nint_distance(x)
else:
raise TypeError("requires an mpf/mpc")
sign, man, exp, bc = re
mag = exp+bc
# |x| < 0.5
if mag < 0:
n = 0
re_dist = mag
elif man:
# exact integer
if exp >= 0:
n = man << exp
re_dist = ctx.ninf
# exact half-integer
elif exp == -1:
n = (man>>1)+1
re_dist = 0
else:
d = (-exp-1)
t = man >> d
if t & 1:
t += 1
man = (t<<d) - man
else:
man -= (t<<d)
n = t>>1 # int(t)>>1
re_dist = exp+bitcount(man)
if sign:
n = -n
elif re == fzero:
re_dist = ctx.ninf
n = 0
else:
raise ValueError("requires a finite number")
return n, max(re_dist, im_dist)
def fprod(ctx, factors):
r"""
Calculates a product containing a finite number of factors (for
infinite products, see :func:`~mpmath.nprod`). The factors will be
converted to mpmath numbers.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fprod([1, 2, 0.5, 7])
mpf('7.0')
"""
orig = ctx.prec
try:
v = ctx.one
for p in factors:
v *= p
finally:
ctx.prec = orig
return +v
def rand(ctx):
"""
Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
The number of randomly generated bits in the mantissa is equal
to the working precision.
"""
return ctx.make_mpf(mpf_rand(ctx._prec))
def fraction(ctx, p, q):
"""
Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
the fraction `p/q`. The value is updated with the precision.
>>> from mpmath import *
>>> mp.dps = 15
>>> a = fraction(1,100)
>>> b = mpf(1)/100
>>> print(a); print(b)
0.01
0.01
>>> mp.dps = 30
>>> print(a); print(b) # a will be accurate
0.01
0.0100000000000000002081668171172
>>> mp.dps = 15
"""
return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
'%s/%s' % (p, q))
def absmin(ctx, x):
return abs(ctx.convert(x))
def absmax(ctx, x):
return abs(ctx.convert(x))
def _as_points(ctx, x):
# XXX: remove this?
if hasattr(x, '_mpi_'):
a, b = x._mpi_
return [ctx.make_mpf(a), ctx.make_mpf(b)]
return x
'''
def _zetasum(ctx, s, a, b):
"""
Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
integers.
"""
a = int(a)
b = int(b)
s = ctx.convert(s)
prec, rounding = ctx._prec_rounding
if hasattr(s, '_mpf_'):
v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
elif hasattr(s, '_mpc_'):
v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
return v
'''
def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
if not (ctx.isint(a) and hasattr(s, "_mpc_")):
raise NotImplementedError
a = int(a)
prec = ctx._prec
xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
xs = [ctx.make_mpc(x) for x in xs]
ys = [ctx.make_mpc(y) for y in ys]
return xs, ys
class PrecisionManager:
def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
self.ctx = ctx
self.precfun = precfun
self.dpsfun = dpsfun
self.normalize_output = normalize_output
def __call__(self, f):
def g(*args, **kwargs):
orig = self.ctx.prec
try:
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
if self.normalize_output:
v = f(*args, **kwargs)
if type(v) is tuple:
return tuple([+a for a in v])
return +v
else:
return f(*args, **kwargs)
finally:
self.ctx.prec = orig
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def __enter__(self):
self.origp = self.ctx.prec
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx.prec = self.origp
return False
if __name__ == '__main__':
import doctest
doctest.testmod()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/ctx_mp.py
|
ctx_mp.py
|
from .functions import defun, defun_wrapped
@defun
def qp(ctx, a, q=None, n=None, **kwargs):
r"""
Evaluates the q-Pochhammer symbol (or q-rising factorial)
.. math ::
(a; q)_n = \prod_{k=0}^{n-1} (1-a q^k)
where `n = \infty` is permitted if `|q| < 1`. Called with two arguments,
``qp(a,q)`` computes `(a;q)_{\infty}`; with a single argument, ``qp(q)``
computes `(q;q)_{\infty}`. The special case
.. math ::
\phi(q) = (q; q)_{\infty} = \prod_{k=1}^{\infty} (1-q^k) =
\sum_{k=-\infty}^{\infty} (-1)^k q^{(3k^2-k)/2}
is also known as the Euler function, or (up to a factor `q^{-1/24}`)
the Dirichlet eta function.
**Examples**
If `n` is a positive integer, the function amounts to a finite product::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qp(2,3,5)
-725305.0
>>> fprod(1-2*3**k for k in range(5))
-725305.0
>>> qp(2,3,0)
1.0
Complex arguments are allowed::
>>> qp(2-1j, 0.75j)
(0.4628842231660149089976379 + 4.481821753552703090628793j)
The regular Pochhammer symbol `(a)_n` is obtained in the
following limit as `q \to 1`::
>>> a, n = 4, 7
>>> limit(lambda q: qp(q**a,q,n) / (1-q)**n, 1)
604800.0
>>> rf(a,n)
604800.0
The Taylor series of the reciprocal Euler function gives
the partition function `P(n)`, i.e. the number of ways of writing
`n` as a sum of positive integers::
>>> taylor(lambda q: 1/qp(q), 0, 10)
[1.0, 1.0, 2.0, 3.0, 5.0, 7.0, 11.0, 15.0, 22.0, 30.0, 42.0]
Special values include::
>>> qp(0)
1.0
>>> findroot(diffun(qp), -0.4) # location of maximum
-0.4112484791779547734440257
>>> qp(_)
1.228348867038575112586878
The q-Pochhammer symbol is related to the Jacobi theta functions.
For example, the following identity holds::
>>> q = mpf(0.5) # arbitrary
>>> qp(q)
0.2887880950866024212788997
>>> root(3,-2)*root(q,-24)*jtheta(2,pi/6,root(q,6))
0.2887880950866024212788997
"""
a = ctx.convert(a)
if n is None:
n = ctx.inf
else:
n = ctx.convert(n)
assert n >= 0
if q is None:
q = a
else:
q = ctx.convert(q)
if n == 0:
return ctx.one + 0*(a+q)
infinite = (n == ctx.inf)
same = (a == q)
if infinite:
if abs(q) >= 1:
if same and (q == -1 or q == 1):
return ctx.zero * q
raise ValueError("q-function only defined for |q| < 1")
elif q == 0:
return ctx.one - a
maxterms = kwargs.get('maxterms', 50*ctx.prec)
if infinite and same:
# Euler's pentagonal theorem
def terms():
t = 1
yield t
k = 1
x1 = q
x2 = q**2
while 1:
yield (-1)**k * x1
yield (-1)**k * x2
x1 *= q**(3*k+1)
x2 *= q**(3*k+2)
k += 1
if k > maxterms:
raise ctx.NoConvergence
return ctx.sum_accurately(terms)
# return ctx.nprod(lambda k: 1-a*q**k, [0,n-1])
def factors():
k = 0
r = ctx.one
while 1:
yield 1 - a*r
r *= q
k += 1
if k >= n:
raise StopIteration
if k > maxterms:
raise ctx.NoConvergence
return ctx.mul_accurately(factors)
@defun_wrapped
def qgamma(ctx, z, q, **kwargs):
r"""
Evaluates the q-gamma function
.. math ::
\Gamma_q(z) = \frac{(q; q)_{\infty}}{(q^z; q)_{\infty}} (1-q)^{1-z}.
**Examples**
Evaluation for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qgamma(4,0.75)
4.046875
>>> qgamma(6,6)
121226245.0
>>> qgamma(3+4j, 0.5j)
(0.1663082382255199834630088 + 0.01952474576025952984418217j)
The q-gamma function satisfies a functional equation similar
to that of the ordinary gamma function::
>>> q = mpf(0.25)
>>> z = mpf(2.5)
>>> qgamma(z+1,q)
1.428277424823760954685912
>>> (1-q**z)/(1-q)*qgamma(z,q)
1.428277424823760954685912
"""
if abs(q) > 1:
return ctx.qgamma(z,1/q)*q**((z-2)*(z-1)*0.5)
return ctx.qp(q, q, None, **kwargs) / \
ctx.qp(q**z, q, None, **kwargs) * (1-q)**(1-z)
@defun_wrapped
def qfac(ctx, z, q, **kwargs):
r"""
Evaluates the q-factorial,
.. math ::
[n]_q! = (1+q)(1+q+q^2)\cdots(1+q+\cdots+q^{n-1})
or more generally
.. math ::
[z]_q! = \frac{(q;q)_z}{(1-q)^z}.
**Examples**
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qfac(0,0)
1.0
>>> qfac(4,3)
2080.0
>>> qfac(5,6)
121226245.0
>>> qfac(1+1j, 2+1j)
(0.4370556551322672478613695 + 0.2609739839216039203708921j)
"""
if ctx.isint(z) and ctx._re(z) > 0:
n = int(ctx._re(z))
return ctx.qp(q, q, n, **kwargs) / (1-q)**n
return ctx.qgamma(z+1, q, **kwargs)
@defun
def qhyper(ctx, a_s, b_s, q, z, **kwargs):
r"""
Evaluates the basic hypergeometric series or hypergeometric q-series
.. math ::
\,_r\phi_s \left[\begin{matrix}
a_1 & a_2 & \ldots & a_r \\
b_1 & b_2 & \ldots & b_s
\end{matrix} ; q,z \right] =
\sum_{n=0}^\infty
\frac{(a_1;q)_n, \ldots, (a_r;q)_n}
{(b_1;q)_n, \ldots, (b_s;q)_n}
\left((-1)^n q^{n\choose 2}\right)^{1+s-r}
\frac{z^n}{(q;q)_n}
where `(a;q)_n` denotes the q-Pochhammer symbol (see :func:`~mpmath.qp`).
**Examples**
Evaluation works for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qhyper([0.5], [2.25], 0.25, 4)
-0.1975849091263356009534385
>>> qhyper([0.5], [2.25], 0.25-0.25j, 4)
(2.806330244925716649839237 + 3.568997623337943121769938j)
>>> qhyper([1+j], [2,3+0.5j], 0.25, 3+4j)
(9.112885171773400017270226 - 1.272756997166375050700388j)
Comparing with a summation of the defining series, using
:func:`~mpmath.nsum`::
>>> b, q, z = 3, 0.25, 0.5
>>> qhyper([], [b], q, z)
0.6221136748254495583228324
>>> nsum(lambda n: z**n / qp(q,q,n)/qp(b,q,n) * q**(n*(n-1)), [0,inf])
0.6221136748254495583228324
"""
#a_s = [ctx._convert_param(a)[0] for a in a_s]
#b_s = [ctx._convert_param(b)[0] for b in b_s]
#q = ctx._convert_param(q)[0]
a_s = [ctx.convert(a) for a in a_s]
b_s = [ctx.convert(b) for b in b_s]
q = ctx.convert(q)
z = ctx.convert(z)
r = len(a_s)
s = len(b_s)
d = 1+s-r
maxterms = kwargs.get('maxterms', 50*ctx.prec)
def terms():
t = ctx.one
yield t
qk = 1
k = 0
x = 1
while 1:
for a in a_s:
p = 1 - a*qk
t *= p
for b in b_s:
p = 1 - b*qk
if not p:
raise ValueError
t /= p
t *= z
x *= (-1)**d * qk ** d
qk *= q
t /= (1 - qk)
k += 1
yield t * x
if k > maxterms:
raise ctx.NoConvergence
return ctx.sum_accurately(terms)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/qfunctions.py
|
qfunctions.py
|
r"""
Elliptic functions historically comprise the elliptic integrals
and their inverses, and originate from the problem of computing the
arc length of an ellipse. From a more modern point of view,
an elliptic function is defined as a doubly periodic function, i.e.
a function which satisfies
.. math ::
f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
for some half-periods `\omega_1, \omega_2` with
`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
functions are the Jacobi elliptic functions. More broadly, this section
includes quasi-doubly periodic functions (such as the Jacobi theta
functions) and other functions useful in the study of elliptic functions.
Many different conventions for the arguments of
elliptic functions are in use. It is even standard to use
different parameterizations for different functions in the same
text or software (and mpmath is no exception).
The usual parameters are the elliptic nome `q`, which usually
must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
complex number); the elliptic modulus `k` (an arbitrary complex
number); and the half-period ratio `\tau`, which usually must
satisfy `\mathrm{Im}[\tau] > 0`.
These quantities can be expressed in terms of each other
using the following relations:
.. math ::
m = k^2
.. math ::
\tau = -i \frac{K(1-m)}{K(m)}
.. math ::
q = e^{i \pi \tau}
.. math ::
k = \frac{\vartheta_2^4(q)}{\vartheta_3^4(q)}
In addition, an alternative definition is used for the nome in
number theory, which we here denote by q-bar:
.. math ::
\bar{q} = q^2 = e^{2 i \pi \tau}
For convenience, mpmath provides functions to convert
between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
**References**
1. [AbramowitzStegun]_
2. [WhittakerWatson]_
"""
from .functions import defun, defun_wrapped
def nome(ctx, m):
m = ctx.convert(m)
if not m:
return m
if m == ctx.one:
return m
if ctx.isnan(m):
return m
if ctx.isinf(m):
if m == ctx.ninf:
return type(m)(-1)
else:
return ctx.mpc(-1)
a = ctx.ellipk(ctx.one-m)
b = ctx.ellipk(m)
v = ctx.exp(-ctx.pi*a/b)
if not ctx._im(m) and ctx._re(m) < 1:
if ctx._is_real_type(m):
return v.real
else:
return v.real + 0j
elif m == 2:
v = ctx.mpc(0, v.imag)
return v
@defun_wrapped
def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qfrom(q=0.25)
0.25
>>> qfrom(m=mfrom(q=0.25))
0.25
>>> qfrom(k=kfrom(q=0.25))
0.25
>>> qfrom(tau=taufrom(q=0.25))
(0.25 + 0.0j)
>>> qfrom(qbar=qbarfrom(q=0.25))
0.25
"""
if q is not None:
return ctx.convert(q)
if m is not None:
return nome(ctx, m)
if k is not None:
return nome(ctx, ctx.convert(k)**2)
if tau is not None:
return ctx.expjpi(tau)
if qbar is not None:
return ctx.sqrt(qbar)
@defun_wrapped
def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the number-theoretic nome `\bar q`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> qbarfrom(qbar=0.25)
0.25
>>> qbarfrom(q=qfrom(qbar=0.25))
0.25
>>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
0.25
>>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
0.25
>>> qbarfrom(tau=taufrom(qbar=0.25))
(0.25 + 0.0j)
"""
if qbar is not None:
return ctx.convert(qbar)
if q is not None:
return ctx.convert(q) ** 2
if m is not None:
return nome(ctx, m) ** 2
if k is not None:
return nome(ctx, ctx.convert(k)**2) ** 2
if tau is not None:
return ctx.expjpi(2*tau)
@defun_wrapped
def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic half-period ratio `\tau`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> taufrom(tau=0.5j)
(0.0 + 0.5j)
>>> taufrom(q=qfrom(tau=0.5j))
(0.0 + 0.5j)
>>> taufrom(m=mfrom(tau=0.5j))
(0.0 + 0.5j)
>>> taufrom(k=kfrom(tau=0.5j))
(0.0 + 0.5j)
>>> taufrom(qbar=qbarfrom(tau=0.5j))
(0.0 + 0.5j)
"""
if tau is not None:
return ctx.convert(tau)
if m is not None:
m = ctx.convert(m)
return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
if k is not None:
k = ctx.convert(k)
return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
if q is not None:
return ctx.log(q) / (ctx.pi*ctx.j)
if qbar is not None:
qbar = ctx.convert(qbar)
return ctx.log(qbar) / (2*ctx.pi*ctx.j)
@defun_wrapped
def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic modulus `k`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> kfrom(k=0.25)
0.25
>>> kfrom(m=mfrom(k=0.25))
0.25
>>> kfrom(q=qfrom(k=0.25))
0.25
>>> kfrom(tau=taufrom(k=0.25))
(0.25 + 0.0j)
>>> kfrom(qbar=qbarfrom(k=0.25))
0.25
As `q \to 1` and `q \to -1`, `k` rapidly approaches
`1` and `i \infty` respectively::
>>> kfrom(q=0.75)
0.9999999999999899166471767
>>> kfrom(q=-0.75)
(0.0 + 7041781.096692038332790615j)
>>> kfrom(q=1)
1
>>> kfrom(q=-1)
(0.0 + +infj)
"""
if k is not None:
return ctx.convert(k)
if m is not None:
return ctx.sqrt(m)
if tau is not None:
q = ctx.expjpi(tau)
if qbar is not None:
q = ctx.sqrt(qbar)
if q == 1:
return q
if q == -1:
return ctx.mpc(0,'inf')
return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
@defun_wrapped
def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
r"""
Returns the elliptic parameter `m`, given any of
`q, m, k, \tau, \bar{q}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> mfrom(m=0.25)
0.25
>>> mfrom(q=qfrom(m=0.25))
0.25
>>> mfrom(k=kfrom(m=0.25))
0.25
>>> mfrom(tau=taufrom(m=0.25))
(0.25 + 0.0j)
>>> mfrom(qbar=qbarfrom(m=0.25))
0.25
As `q \to 1` and `q \to -1`, `m` rapidly approaches
`1` and `-\infty` respectively::
>>> mfrom(q=0.75)
0.9999999999999798332943533
>>> mfrom(q=-0.75)
-49586681013729.32611558353
>>> mfrom(q=1)
1.0
>>> mfrom(q=-1)
-inf
The inverse nome as a function of `q` has an integer
Taylor series expansion::
>>> taylor(lambda q: mfrom(q), 0, 7)
[0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
"""
if m is not None:
return m
if k is not None:
return k**2
if tau is not None:
q = ctx.expjpi(tau)
if qbar is not None:
q = ctx.sqrt(qbar)
if q == 1:
return ctx.convert(q)
if q == -1:
return q*ctx.inf
v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
if ctx._is_real_type(q) and q < 0:
v = v.real
return v
jacobi_spec = {
'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
'dn' : ([4],[3],[3],[4], '1', 'sech'),
'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
'nd' : ([3],[4],[4],[3], '1', 'cosh'),
'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
'cd' : ([3],[2],[2],[3], 'cos', '1'),
'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
'dc' : ([2],[3],[3],[2], 'sec', '1'),
'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
'cc' : None,
'ss' : None,
'nn' : None,
'dd' : None
}
@defun
def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
try:
S = jacobi_spec[kind]
except KeyError:
raise ValueError("First argument must be a two-character string "
"containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
if u is None:
def f(*args, **kwargs):
return ctx.ellipfun(kind, *args, **kwargs)
f.__name__ = kind
return f
prec = ctx.prec
try:
ctx.prec += 10
u = ctx.convert(u)
q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
if S is None:
v = ctx.one + 0*q*u
elif q == ctx.zero:
if S[4] == '1': v = ctx.one
else: v = getattr(ctx, S[4])(u)
v += 0*q*u
elif q == ctx.one:
if S[5] == '1': v = ctx.one
else: v = getattr(ctx, S[5])(u)
v += 0*q*u
else:
t = u / ctx.jtheta(3, 0, q)**2
v = ctx.one
for a in S[0]: v *= ctx.jtheta(a, 0, q)
for b in S[1]: v /= ctx.jtheta(b, 0, q)
for c in S[2]: v *= ctx.jtheta(c, t, q)
for d in S[3]: v /= ctx.jtheta(d, t, q)
finally:
ctx.prec = prec
return +v
@defun_wrapped
def kleinj(ctx, tau=None, **kwargs):
r"""
Evaluates the Klein j-invariant, which is a modular function defined for
`\tau` in the upper half-plane as
.. math ::
J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
where `g_2` and `g_3` are the modular invariants of the Weierstrass
elliptic function,
.. math ::
g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
An alternative, common notation is that of the j-function
`j(\tau) = 1728 J(\tau)`.
**Plots**
.. literalinclude :: /plots/kleinj.py
.. image :: /plots/kleinj.png
.. literalinclude :: /plots/kleinj2.py
.. image :: /plots/kleinj2.png
**Examples**
Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tau = 0.625+0.75*j
>>> tau = 0.625+0.75*j
>>> kleinj(tau)
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
>>> kleinj(tau+1)
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
>>> kleinj(-1/tau)
(-0.1507492166511182267125242 + 0.07595948379084571927228946j)
The j-function has a famous Laurent series expansion in terms of the nome
`\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
>>> mp.dps = 15
>>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
[1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
The j-function admits exact evaluation at special algebraic points
related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
>>> @extraprec(10)
... def h(n):
... v = (1+sqrt(n)*j)
... if n > 2:
... v *= 0.5
... return v
...
>>> mp.dps = 25
>>> for n in [1,2,3,7,11,19,43,67,163]:
... n, chop(1728*kleinj(h(n)))
...
(1, 1728.0)
(2, 8000.0)
(3, 0.0)
(7, -3375.0)
(11, -32768.0)
(19, -884736.0)
(43, -884736000.0)
(67, -147197952000.0)
(163, -262537412640768000.0)
Also at other special points, the j-function assumes explicit
algebraic values, e.g.::
>>> chop(1728*kleinj(j*sqrt(5)))
1264538.909475140509320227
>>> identify(cbrt(_)) # note: not simplified
'((100+sqrt(13520))/2)'
>>> (50+26*sqrt(5))**3
1264538.909475140509320227
"""
q = ctx.qfrom(tau=tau, **kwargs)
t2 = ctx.jtheta(2,0,q)
t3 = ctx.jtheta(3,0,q)
t4 = ctx.jtheta(4,0,q)
P = (t2**8 + t3**8 + t4**8)**3
Q = 54*(t2*t3*t4)**8
return P/Q
def RF_calc(ctx, x, y, z, r):
if y == z: return RC_calc(ctx, x, y, r)
if x == z: return RC_calc(ctx, y, x, r)
if x == y: return RC_calc(ctx, z, x, r)
if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
return x*y*z
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
return ctx.zero
xm,ym,zm = x,y,z
A0 = Am = (x+y+z)/3
Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
g = ctx.mpf(0.25)
pow4 = ctx.one
m = 0
while 1:
xs = ctx.sqrt(xm)
ys = ctx.sqrt(ym)
zs = ctx.sqrt(zm)
lm = xs*ys + xs*zs + ys*zs
Am1 = (Am+lm)*g
xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
if pow4 * Q < abs(Am):
break
Am = Am1
m += 1
pow4 *= g
t = pow4/Am
X = (A0-x)*t
Y = (A0-y)*t
Z = -X-Y
E2 = X*Y-Z**2
E3 = X*Y*Z
return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
def RC_calc(ctx, x, y, r, pv=True):
if not (ctx.isnormal(x) and ctx.isnormal(y)):
if ctx.isinf(x) or ctx.isinf(y):
return 1/(x*y)
if y == 0:
return ctx.inf
if x == 0:
return ctx.pi / ctx.sqrt(y) / 2
raise ValueError
# Cauchy principal value
if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
if x == y:
return 1/ctx.sqrt(x)
extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
ctx.prec += extraprec
if ctx._is_real_type(x) and ctx._is_real_type(y):
x = ctx._re(x)
y = ctx._re(y)
a = ctx.sqrt(x/y)
if x < y:
b = ctx.sqrt(y-x)
v = ctx.acos(a)/b
else:
b = ctx.sqrt(x-y)
v = ctx.acosh(a)/b
else:
sx = ctx.sqrt(x)
sy = ctx.sqrt(y)
v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
ctx.prec -= extraprec
return v
def RJ_calc(ctx, x, y, z, p, r):
if not (ctx.isnormal(x) and ctx.isnormal(y) and \
ctx.isnormal(z) and ctx.isnormal(p)):
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
return x*y*z
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
return ctx.zero
if not p:
return ctx.inf
xm,ym,zm,pm = x,y,z,p
A0 = Am = (x + y + z + 2*p)/5
delta = (p-x)*(p-y)*(p-z)
Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
m = 0
g = ctx.mpf(0.25)
pow4 = ctx.one
S = 0
while 1:
sx = ctx.sqrt(xm)
sy = ctx.sqrt(ym)
sz = ctx.sqrt(zm)
sp = ctx.sqrt(pm)
lm = sx*sy + sx*sz + sy*sz
Am1 = (Am+lm)*g
xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
dm = (sp+sx) * (sp+sy) * (sp+sz)
em = delta * ctx.power(4, -3*m) / dm**2
if pow4 * Q < abs(Am):
break
T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
S += T
pow4 *= g
m += 1
Am = Am1
t = ctx.ldexp(1,-2*m) / Am
X = (A0-x)*t
Y = (A0-y)*t
Z = (A0-z)*t
P = (-X-Y-Z)/2
E2 = X*Y + X*Z + Y*Z - 3*P**2
E3 = X*Y*Z + 2*E2*P + 4*P**3
E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
E5 = X*Y*Z*P**2
P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
Q = 24024
v1 = g**m * ctx.power(Am, -1.5) * P/Q
v2 = 6*S
return v1 + v2
@defun
def elliprf(ctx, x, y, z):
r"""
Evaluates the Carlson symmetric elliptic integral of the first kind
.. math ::
R_F(x,y,z) = \frac{1}{2}
\int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
which is defined for `x,y,z \notin (-\infty,0)`, and with
at most one of `x,y,z` being zero.
For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
For complex `x,y,z`, the principal square root is taken as `t \to \infty`
and as `t \to 0` non-principal branches are chosen as necessary so as to
make the integrand continuous.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprf(0,1,1); pi/2
1.570796326794896619231322
1.570796326794896619231322
>>> elliprf(0,1,inf)
0.0
>>> elliprf(1,1,1)
1.0
>>> elliprf(2,2,2)**2
0.5
>>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
+inf
+inf
+inf
+inf
Representing complete elliptic integrals in terms of `R_F`::
>>> m = mpf(0.75)
>>> ellipk(m); elliprf(0,1-m,1)
2.156515647499643235438675
2.156515647499643235438675
>>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
1.211056027568459524803563
1.211056027568459524803563
Some symmetries and argument transformations::
>>> x,y,z = 2,3,4
>>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
0.5840828416771517066928492
0.5840828416771517066928492
0.5840828416771517066928492
>>> k = mpf(100000)
>>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
0.001847032121923321253219284
0.001847032121923321253219284
>>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
>>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
0.5840828416771517066928492
0.5840828416771517066928492
>>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
0.5840828416771517066928492
Comparing with numerical integration::
>>> x,y,z = 2,3,4
>>> elliprf(x,y,z)
0.5840828416771517066928492
>>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
>>> q = extradps(25)(quad)
>>> q(f, [0,inf])
0.5840828416771517066928492
With the following arguments, the square root in the integrand becomes
discontinuous at `t = 1/2` if the principal branch is used. To obtain
the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
on `t \in (0, 1/2)`::
>>> x,y,z = j-1,j,0
>>> elliprf(x,y,z)
(0.7961258658423391329305694 - 1.213856669836495986430094j)
>>> -q(f, [0,0.5]) + q(f, [0.5,inf])
(0.7961258658423391329305694 - 1.213856669836495986430094j)
The so-called *first lemniscate constant*, a transcendental number::
>>> elliprf(0,1,2)
1.31102877714605990523242
>>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
1.31102877714605990523242
>>> gamma('1/4')**2/(4*sqrt(2*pi))
1.31102877714605990523242
**References**
1. [Carlson]_
2. [DLMF]_ Chapter 19. Elliptic Integrals
"""
x = ctx.convert(x)
y = ctx.convert(y)
z = ctx.convert(z)
prec = ctx.prec
try:
ctx.prec += 20
tol = ctx.eps * 2**10
v = RF_calc(ctx, x, y, z, tol)
finally:
ctx.prec = prec
return +v
@defun
def elliprc(ctx, x, y, pv=True):
r"""
Evaluates the degenerate Carlson symmetric elliptic integral
of the first kind
.. math ::
R_C(x,y) = R_F(x,y,y) =
\frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
If `y \in (-\infty,0)`, either a value defined by continuity,
or with *pv=True* the Cauchy principal value, can be computed.
If `x \ge 0, y > 0`, the value can be expressed in terms of
elementary functions as
.. math ::
R_C(x,y) =
\begin{cases}
\dfrac{1}{\sqrt{y-x}}
\cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
\dfrac{1}{\sqrt{y}}, & x = y \\
\dfrac{1}{\sqrt{x-y}}
\cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
\end{cases}.
**Examples**
Some special values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
3.141592653589793238462643
3.141592653589793238462643
3.141592653589793238462643
>>> elliprc(1,0)
+inf
>>> elliprc(5,5)**2
0.2
>>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
0.0
0.0
0.0
Comparing with the elementary closed-form solution::
>>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
2.041630778983498390751238
2.041630778983498390751238
>>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
1.875180765206547065111085
1.875180765206547065111085
Comparing with numerical integration::
>>> q = extradps(25)(quad)
>>> elliprc(2, -3, pv=True)
0.3333969101113672670749334
>>> elliprc(2, -3, pv=False)
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
>>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
"""
x = ctx.convert(x)
y = ctx.convert(y)
prec = ctx.prec
try:
ctx.prec += 20
tol = ctx.eps * 2**10
v = RC_calc(ctx, x, y, tol, pv)
finally:
ctx.prec = prec
return +v
@defun
def elliprj(ctx, x, y, z, p):
r"""
Evaluates the Carlson symmetric elliptic integral of the third kind
.. math ::
R_J(x,y,z,p) = \frac{3}{2}
\int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
is defined so as to be continuous along the path of integration for
complex values of the arguments.
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprj(1,1,1,1)
1.0
>>> elliprj(2,2,2,2); 1/(2*sqrt(2))
0.3535533905932737622004222
0.3535533905932737622004222
>>> elliprj(0,1,2,2)
1.067937989667395702268688
>>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
1.067937989667395702268688
>>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
1.380226776765915172432054
1.380226776765915172432054
>>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
+inf
+inf
+inf
>>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
0.0
0.0
>>> chop(elliprj(1+j, 1-j, 1, 1))
0.8505007163686739432927844
Scale transformation::
>>> x,y,z,p = 2,3,4,5
>>> k = mpf(100000)
>>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
4.521291677592745527851168e-9
4.521291677592745527851168e-9
Comparing with numerical integration::
>>> elliprj(1,2,3,4)
0.2398480997495677621758617
>>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
>>> 1.5*quad(f, [0,inf])
0.2398480997495677621758617
>>> elliprj(1,2+1j,3,4-2j)
(0.216888906014633498739952 + 0.04081912627366673332369512j)
>>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
>>> 1.5*quad(f, [0,inf])
(0.216888906014633498739952 + 0.04081912627366673332369511j)
"""
x = ctx.convert(x)
y = ctx.convert(y)
z = ctx.convert(z)
p = ctx.convert(p)
prec = ctx.prec
try:
ctx.prec += 20
tol = ctx.eps * 2**10
v = RJ_calc(ctx, x, y, z, p, tol)
finally:
ctx.prec = prec
return +v
@defun
def elliprd(ctx, x, y, z):
r"""
Evaluates the degenerate Carlson symmetric elliptic integral
of the third kind or Carlson elliptic integral of the
second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
See :func:`~mpmath.elliprj` for additional information.
**Examples**
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprd(1,2,3)
0.2904602810289906442326534
>>> elliprj(1,2,3,3)
0.2904602810289906442326534
The so-called *second lemniscate constant*, a transcendental number::
>>> elliprd(0,2,1)/3
0.5990701173677961037199612
>>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
0.5990701173677961037199612
>>> gamma('3/4')**2/sqrt(2*pi)
0.5990701173677961037199612
"""
return ctx.elliprj(x,y,z,z)
@defun
def elliprg(ctx, x, y, z):
r"""
Evaluates the Carlson completely symmetric elliptic integral
of the second kind
.. math ::
R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
\frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
\left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
**Examples**
Evaluation for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> elliprg(0,1,1)*4; +pi
3.141592653589793238462643
3.141592653589793238462643
>>> elliprg(0,0.5,1)
0.6753219405238377512600874
>>> chop(elliprg(1+j, 1-j, 2))
1.172431327676416604532822
A double integral that can be evaluated in terms of `R_G`::
>>> x,y,z = 2,3,4
>>> def f(t,u):
... st = fp.sin(t); ct = fp.cos(t)
... su = fp.sin(u); cu = fp.cos(u)
... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
...
>>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
1.725503028069
>>> nprint(elliprg(x,y,z), 13)
1.725503028069
"""
x = ctx.convert(x)
y = ctx.convert(y)
z = ctx.convert(z)
if not z: x, z = z, x
if not z: y, z = x, y
if not z: return ctx.inf
def terms():
T1 = 0.5*z*ctx.elliprf(x,y,z)
T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
T3 = 0.5*ctx.sqrt(x*y/z)
return T1,T2,T3
return ctx.sum_accurately(terms)
@defun_wrapped
def ellipf(ctx, phi, m):
r"""
Evaluates the Legendre incomplete elliptic integral of the first kind
.. math ::
F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
or equivalently
.. math ::
F(\phi,m) = \int_0^{\sin z}
\frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
The function reduces to a complete elliptic integral of the first kind
(see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
.. math ::
F\left(\frac{\pi}{2}, m\right) = K(m).
In the defining integral, it is assumed that the principal branch
of the square root is taken and that the path of integration avoids
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
the function extends quasi-periodically as
.. math ::
F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
**Plots**
.. literalinclude :: /plots/ellipf.py
.. image :: /plots/ellipf.png
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipf(0,1)
0.0
>>> ellipf(0,0)
0.0
>>> ellipf(1,0); ellipf(2+3j,0)
1.0
(2.0 + 3.0j)
>>> ellipf(1,1); log(sec(1)+tan(1))
1.226191170883517070813061
1.226191170883517070813061
>>> ellipf(pi/2, -0.5); ellipk(-0.5)
1.415737208425956198892166
1.415737208425956198892166
>>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
+inf
+inf
>>> ellipf(1.5, 1)
3.340677542798311003320813
Comparing with numerical integration::
>>> z,m = 0.5, 1.25
>>> ellipf(z,m)
0.5287219202206327872978255
>>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
0.5287219202206327872978255
The arguments may be complex numbers::
>>> ellipf(3j, 0.5)
(0.0 + 1.713602407841590234804143j)
>>> ellipf(3+4j, 5-6j)
(1.269131241950351323305741 - 0.3561052815014558335412538j)
>>> z,m = 2+3j, 1.25
>>> k = 1011
>>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
(4086.184383622179764082821 - 3003.003538923749396546871j)
(4086.184383622179764082821 - 3003.003538923749396546871j)
For `|\Re(z)| < \pi/2`, the function can be expressed as a
hypergeometric series of two variables
(see :func:`~mpmath.appellf1`)::
>>> z,m = 0.5, 0.25
>>> ellipf(z,m)
0.5050887275786480788831083
>>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
0.5050887275786480788831083
"""
z = phi
if not (ctx.isnormal(z) and ctx.isnormal(m)):
if m == 0:
return z + m
if z == 0:
return z * m
if m == ctx.inf or m == ctx.ninf: return z/m
raise ValueError
x = z.real
ctx.prec += max(0, ctx.mag(x))
pi = +ctx.pi
away = abs(x) > pi/2
if m == 1:
if away:
return ctx.inf
if away:
d = ctx.nint(x/pi)
z = z-pi*d
P = 2*d*ctx.ellipk(m)
else:
P = 0
c, s = ctx.cos_sin(z)
return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
@defun_wrapped
def ellipe(ctx, *args):
r"""
Called with a single argument `m`, evaluates the Legendre complete
elliptic integral of the second kind, `E(m)`, defined by
.. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
\frac{\pi}{2}
\,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
Called with two arguments `\phi, m`, evaluates the incomplete elliptic
integral of the second kind
.. math ::
E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
\int_0^{\sin z}
\frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
The incomplete integral reduces to a complete integral when
`\phi = \frac{\pi}{2}`; that is,
.. math ::
E\left(\frac{\pi}{2}, m\right) = E(m).
In the defining integral, it is assumed that the principal branch
of the square root is taken and that the path of integration avoids
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
the function extends quasi-periodically as
.. math ::
E(\phi + n \pi, m) = 2 n E(m) + F(\phi,m), n \in \mathbb{Z}.
**Plots**
.. literalinclude :: /plots/ellipe.py
.. image :: /plots/ellipe.png
**Examples for the complete integral**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipe(0)
1.570796326794896619231322
>>> ellipe(1)
1.0
>>> ellipe(-1)
1.910098894513856008952381
>>> ellipe(2)
(0.5990701173677961037199612 + 0.5990701173677961037199612j)
>>> ellipe(inf)
(0.0 + +infj)
>>> ellipe(-inf)
+inf
Verifying the defining integral and hypergeometric
representation::
>>> ellipe(0.5)
1.350643881047675502520175
>>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
1.350643881047675502520175
>>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
1.350643881047675502520175
Evaluation is supported for arbitrary complex `m`::
>>> ellipe(0.5+0.25j)
(1.360868682163129682716687 - 0.1238733442561786843557315j)
>>> ellipe(3+4j)
(1.499553520933346954333612 - 1.577879007912758274533309j)
A definite integral::
>>> quad(ellipe, [0,1])
1.333333333333333333333333
**Examples for the incomplete integral**
Basic values and limits::
>>> ellipe(0,1)
0.0
>>> ellipe(0,0)
0.0
>>> ellipe(1,0)
1.0
>>> ellipe(2+3j,0)
(2.0 + 3.0j)
>>> ellipe(1,1); sin(1)
0.8414709848078965066525023
0.8414709848078965066525023
>>> ellipe(pi/2, -0.5); ellipe(-0.5)
1.751771275694817862026502
1.751771275694817862026502
>>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
1.0
-1.0
>>> ellipe(1.5, 1)
0.9974949866040544309417234
Comparing with numerical integration::
>>> z,m = 0.5, 1.25
>>> ellipe(z,m)
0.4740152182652628394264449
>>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
0.4740152182652628394264449
The arguments may be complex numbers::
>>> ellipe(3j, 0.5)
(0.0 + 7.551991234890371873502105j)
>>> ellipe(3+4j, 5-6j)
(24.15299022574220502424466 + 75.2503670480325997418156j)
>>> k = 35
>>> z,m = 2+3j, 1.25
>>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
(48.30138799412005235090766 + 17.47255216721987688224357j)
(48.30138799412005235090766 + 17.47255216721987688224357j)
For `|\Re(z)| < \pi/2`, the function can be expressed as a
hypergeometric series of two variables
(see :func:`~mpmath.appellf1`)::
>>> z,m = 0.5, 0.25
>>> ellipe(z,m)
0.4950017030164151928870375
>>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
0.4950017030164151928870376
"""
if len(args) == 1:
return ctx._ellipe(args[0])
else:
phi, m = args
z = phi
if not (ctx.isnormal(z) and ctx.isnormal(m)):
if m == 0:
return z + m
if z == 0:
return z * m
if m == ctx.inf or m == ctx.ninf:
return ctx.inf
raise ValueError
x = z.real
ctx.prec += max(0, ctx.mag(x))
pi = +ctx.pi
away = abs(x) > pi/2
if away:
d = ctx.nint(x/pi)
z = z-pi*d
P = 2*d*ctx.ellipe(m)
else:
P = 0
def terms():
c, s = ctx.cos_sin(z)
x = c**2
y = 1-m*s**2
RF = ctx.elliprf(x, y, 1)
RD = ctx.elliprd(x, y, 1)
return s*RF, -m*s**3*RD/3
return ctx.sum_accurately(terms) + P
@defun_wrapped
def ellippi(ctx, *args):
r"""
Called with three arguments `n, \phi, m`, evaluates the Legendre
incomplete elliptic integral of the third kind
.. math ::
\Pi(n; \phi, m) = \int_0^{\phi}
\frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
\int_0^{\sin \phi}
\frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
Called with two arguments `n, m`, evaluates the complete
elliptic integral of the third kind
`\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
In the defining integral, it is assumed that the principal branch
of the square root is taken and that the path of integration avoids
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
the function extends quasi-periodically as
.. math ::
\Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
**Plots**
.. literalinclude :: /plots/ellippi.py
.. image :: /plots/ellippi.png
**Examples for the complete integral**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellippi(0,-5); ellipk(-5)
0.9555039270640439337379334
0.9555039270640439337379334
>>> ellippi(inf,2)
0.0
>>> ellippi(2,inf)
0.0
>>> abs(ellippi(1,5))
+inf
>>> abs(ellippi(0.25,1))
+inf
Evaluation in terms of simpler functions::
>>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
1.956616279119236207279727
1.956616279119236207279727
>>> ellippi(3,0); pi/(2*sqrt(-2))
(0.0 - 1.11072073453959156175397j)
(0.0 - 1.11072073453959156175397j)
>>> ellippi(-3,0); pi/(2*sqrt(4))
0.7853981633974483096156609
0.7853981633974483096156609
**Examples for the incomplete integral**
Basic values and limits::
>>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
1.622944760954741603710555
1.622944760954741603710555
>>> ellippi(1,0,1)
0.0
>>> ellippi(inf,0,1)
0.0
>>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
0.2513040086544925794134591
0.2513040086544925794134591
>>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
2.054332933256248668692452
2.054332933256248668692452
>>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
135.240868757890840755058
135.240868757890840755058
>>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
0.9190227391656969903987269
0.9190227391656969903987269
Complex arguments are supported::
>>> ellippi(0.5, 5+6j-2*pi, -7-8j)
(-0.3612856620076747660410167 + 0.5217735339984807829755815j)
"""
if len(args) == 2:
n, m = args
complete = True
z = phi = ctx.pi/2
else:
n, phi, m = args
complete = False
z = phi
if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
raise ValueError
if complete:
if m == 0: return ctx.pi/(2*ctx.sqrt(1-n))
if n == 0: return ctx.ellipk(m)
if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
else:
if z == 0: return z
if ctx.isinf(n): return ctx.zero
if ctx.isinf(m): return ctx.zero
if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
raise ValueError
if complete:
if m == 1: return -ctx.inf/ctx.sign(n-1)
away = False
else:
x = z.real
ctx.prec += max(0, ctx.mag(x))
pi = +ctx.pi
away = abs(x) > pi/2
if away:
d = ctx.nint(x/pi)
z = z-pi*d
P = 2*d*ctx.ellippi(n,m)
else:
P = 0
def terms():
if complete:
c, s = ctx.zero, ctx.one
else:
c, s = ctx.cos_sin(z)
x = c**2
y = 1-m*s**2
RF = ctx.elliprf(x, y, 1)
RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
return s*RF, n*s**3*RJ/3
return ctx.sum_accurately(terms) + P
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/elliptic.py
|
elliptic.py
|
from ..libmp.backend import xrange
from .functions import defun, defun_wrapped
def _check_need_perturb(ctx, terms, prec, discard_known_zeros):
perturb = recompute = False
extraprec = 0
discard = []
for term_index, term in enumerate(terms):
w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term
have_singular_nongamma_weight = False
# Avoid division by zero in leading factors (TODO:
# also check for near division by zero?)
for k, w in enumerate(w_s):
if not w:
if ctx.re(c_s[k]) <= 0 and c_s[k]:
perturb = recompute = True
have_singular_nongamma_weight = True
pole_count = [0, 0, 0]
# Check for gamma and series poles and near-poles
for data_index, data in enumerate([alpha_s, beta_s, b_s]):
for i, x in enumerate(data):
n, d = ctx.nint_distance(x)
# Poles
if n > 0:
continue
if d == ctx.ninf:
# OK if we have a polynomial
# ------------------------------
ok = False
if data_index == 2:
for u in a_s:
if ctx.isnpint(u) and u >= int(n):
ok = True
break
if ok:
continue
pole_count[data_index] += 1
# ------------------------------
#perturb = recompute = True
#return perturb, recompute, extraprec
elif d < -4:
extraprec += -d
recompute = True
if discard_known_zeros and pole_count[1] > pole_count[0] + pole_count[2] \
and not have_singular_nongamma_weight:
discard.append(term_index)
elif sum(pole_count):
perturb = recompute = True
return perturb, recompute, extraprec, discard
_hypercomb_msg = """
hypercomb() failed to converge to the requested %i bits of accuracy
using a working precision of %i bits. The function value may be zero or
infinite; try passing zeroprec=N or infprec=M to bound finite values between
2^(-N) and 2^M. Otherwise try a higher maxprec or maxterms.
"""
@defun
def hypercomb(ctx, function, params=[], discard_known_zeros=True, **kwargs):
orig = ctx.prec
sumvalue = ctx.zero
dist = ctx.nint_distance
ninf = ctx.ninf
orig_params = params[:]
verbose = kwargs.get('verbose', False)
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(orig))
kwargs['maxprec'] = maxprec # For calls to hypsum
zeroprec = kwargs.get('zeroprec')
infprec = kwargs.get('infprec')
perturbed_reference_value = None
hextra = 0
try:
while 1:
ctx.prec += 10
if ctx.prec > maxprec:
raise ValueError(_hypercomb_msg % (orig, ctx.prec))
orig2 = ctx.prec
params = orig_params[:]
terms = function(*params)
if verbose:
print()
print("ENTERING hypercomb main loop")
print("prec =", ctx.prec)
print("hextra", hextra)
perturb, recompute, extraprec, discard = \
_check_need_perturb(ctx, terms, orig, discard_known_zeros)
ctx.prec += extraprec
if perturb:
if "hmag" in kwargs:
hmag = kwargs["hmag"]
elif ctx._fixed_precision:
hmag = int(ctx.prec*0.3)
else:
hmag = orig + 10 + hextra
h = ctx.ldexp(ctx.one, -hmag)
ctx.prec = orig2 + 10 + hmag + 10
for k in range(len(params)):
params[k] += h
# Heuristically ensure that the perturbations
# are "independent" so that two perturbations
# don't accidentally cancel each other out
# in a subtraction.
h += h/(k+1)
if recompute:
terms = function(*params)
if discard_known_zeros:
terms = [term for (i, term) in enumerate(terms) if i not in discard]
if not terms:
return ctx.zero
evaluated_terms = []
for term_index, term_data in enumerate(terms):
w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term_data
if verbose:
print()
print(" Evaluating term %i/%i : %iF%i" % \
(term_index+1, len(terms), len(a_s), len(b_s)))
print(" powers", ctx.nstr(w_s), ctx.nstr(c_s))
print(" gamma", ctx.nstr(alpha_s), ctx.nstr(beta_s))
print(" hyper", ctx.nstr(a_s), ctx.nstr(b_s))
print(" z", ctx.nstr(z))
#v = ctx.hyper(a_s, b_s, z, **kwargs)
#for a in alpha_s: v *= ctx.gamma(a)
#for b in beta_s: v *= ctx.rgamma(b)
#for w, c in zip(w_s, c_s): v *= ctx.power(w, c)
v = ctx.fprod([ctx.hyper(a_s, b_s, z, **kwargs)] + \
[ctx.gamma(a) for a in alpha_s] + \
[ctx.rgamma(b) for b in beta_s] + \
[ctx.power(w,c) for (w,c) in zip(w_s,c_s)])
if verbose:
print(" Value:", v)
evaluated_terms.append(v)
if len(terms) == 1 and (not perturb):
sumvalue = evaluated_terms[0]
break
if ctx._fixed_precision:
sumvalue = ctx.fsum(evaluated_terms)
break
sumvalue = ctx.fsum(evaluated_terms)
term_magnitudes = [ctx.mag(x) for x in evaluated_terms]
max_magnitude = max(term_magnitudes)
sum_magnitude = ctx.mag(sumvalue)
cancellation = max_magnitude - sum_magnitude
if verbose:
print()
print(" Cancellation:", cancellation, "bits")
print(" Increased precision:", ctx.prec - orig, "bits")
precision_ok = cancellation < ctx.prec - orig
if zeroprec is None:
zero_ok = False
else:
zero_ok = max_magnitude - ctx.prec < -zeroprec
if infprec is None:
inf_ok = False
else:
inf_ok = max_magnitude > infprec
if precision_ok and (not perturb) or ctx.isnan(cancellation):
break
elif precision_ok:
if perturbed_reference_value is None:
hextra += 20
perturbed_reference_value = sumvalue
continue
elif ctx.mag(sumvalue - perturbed_reference_value) <= \
ctx.mag(sumvalue) - orig:
break
elif zero_ok:
sumvalue = ctx.zero
break
elif inf_ok:
sumvalue = ctx.inf
break
elif 'hmag' in kwargs:
break
else:
hextra *= 2
perturbed_reference_value = sumvalue
# Increase precision
else:
increment = min(max(cancellation, orig//2), max(extraprec,orig))
ctx.prec += increment
if verbose:
print(" Must start over with increased precision")
continue
finally:
ctx.prec = orig
return +sumvalue
@defun
def hyper(ctx, a_s, b_s, z, **kwargs):
"""
Hypergeometric function, general case.
"""
z = ctx.convert(z)
p = len(a_s)
q = len(b_s)
a_s = [ctx._convert_param(a) for a in a_s]
b_s = [ctx._convert_param(b) for b in b_s]
# Reduce degree by eliminating common parameters
if kwargs.get('eliminate', True):
i = 0
while i < q and a_s:
b = b_s[i]
if b in a_s:
a_s.remove(b)
b_s.remove(b)
p -= 1
q -= 1
else:
i += 1
# Handle special cases
if p == 0:
if q == 1: return ctx._hyp0f1(b_s, z, **kwargs)
elif q == 0: return ctx.exp(z)
elif p == 1:
if q == 1: return ctx._hyp1f1(a_s, b_s, z, **kwargs)
elif q == 2: return ctx._hyp1f2(a_s, b_s, z, **kwargs)
elif q == 0: return ctx._hyp1f0(a_s[0][0], z)
elif p == 2:
if q == 1: return ctx._hyp2f1(a_s, b_s, z, **kwargs)
elif q == 2: return ctx._hyp2f2(a_s, b_s, z, **kwargs)
elif q == 3: return ctx._hyp2f3(a_s, b_s, z, **kwargs)
elif q == 0: return ctx._hyp2f0(a_s, b_s, z, **kwargs)
elif p == q+1:
return ctx._hypq1fq(p, q, a_s, b_s, z, **kwargs)
elif p > q+1 and not kwargs.get('force_series'):
return ctx._hyp_borel(p, q, a_s, b_s, z, **kwargs)
coeffs, types = zip(*(a_s+b_s))
return ctx.hypsum(p, q, types, coeffs, z, **kwargs)
@defun
def hyp0f1(ctx,b,z,**kwargs):
return ctx.hyper([],[b],z,**kwargs)
@defun
def hyp1f1(ctx,a,b,z,**kwargs):
return ctx.hyper([a],[b],z,**kwargs)
@defun
def hyp1f2(ctx,a1,b1,b2,z,**kwargs):
return ctx.hyper([a1],[b1,b2],z,**kwargs)
@defun
def hyp2f1(ctx,a,b,c,z,**kwargs):
return ctx.hyper([a,b],[c],z,**kwargs)
@defun
def hyp2f2(ctx,a1,a2,b1,b2,z,**kwargs):
return ctx.hyper([a1,a2],[b1,b2],z,**kwargs)
@defun
def hyp2f3(ctx,a1,a2,b1,b2,b3,z,**kwargs):
return ctx.hyper([a1,a2],[b1,b2,b3],z,**kwargs)
@defun
def hyp2f0(ctx,a,b,z,**kwargs):
return ctx.hyper([a,b],[],z,**kwargs)
@defun
def hyp3f2(ctx,a1,a2,a3,b1,b2,z,**kwargs):
return ctx.hyper([a1,a2,a3],[b1,b2],z,**kwargs)
@defun_wrapped
def _hyp1f0(ctx, a, z):
return (1-z) ** (-a)
@defun
def _hyp0f1(ctx, b_s, z, **kwargs):
(b, btype), = b_s
if z:
magz = ctx.mag(z)
else:
magz = 0
if magz >= 8 and not kwargs.get('force_series'):
try:
# http://functions.wolfram.com/HypergeometricFunctions/
# Hypergeometric0F1/06/02/03/0004/
# We don't need hypercomb because the only possible singularity
# occurs when the value is undefined. However, we should perhaps
# still check for cancellation...
# TODO: handle the all-real case more efficiently!
# TODO: figure out how much precision is needed (exponential growth)
orig = ctx.prec
try:
ctx.prec += 12 + magz//2
w = ctx.sqrt(-z)
jw = ctx.j*w
u = 1/(4*jw)
c = ctx.mpq_1_2 - b
E = ctx.exp(2*jw)
H1 = (-jw)**c/E*ctx.hyp2f0(b-ctx.mpq_1_2, ctx.mpq_3_2-b, -u,
force_series=True)
H2 = (jw)**c*E*ctx.hyp2f0(b-ctx.mpq_1_2, ctx.mpq_3_2-b, u,
force_series=True)
v = ctx.gamma(b)/(2*ctx.sqrt(ctx.pi))*(H1 + H2)
finally:
ctx.prec = orig
if ctx._is_real_type(b) and ctx._is_real_type(z):
v = ctx._re(v)
return +v
except ctx.NoConvergence:
pass
return ctx.hypsum(0, 1, (btype,), [b], z, **kwargs)
@defun
def _hyp1f1(ctx, a_s, b_s, z, **kwargs):
(a, atype), = a_s
(b, btype), = b_s
if not z:
return ctx.one+z
magz = ctx.mag(z)
if magz >= 7 and not (ctx.isint(a) and ctx.re(a) <= 0):
if ctx.isinf(z):
if ctx.sign(a) == ctx.sign(b) == ctx.sign(z) == 1:
return ctx.inf
return ctx.nan * z
try:
try:
ctx.prec += magz
sector = ctx._im(z) < 0
def h(a,b):
if sector:
E = ctx.expjpi(ctx.fneg(a, exact=True))
else:
E = ctx.expjpi(a)
rz = 1/z
T1 = ([E,z], [1,-a], [b], [b-a], [a, 1+a-b], [], -rz)
T2 = ([ctx.exp(z),z], [1,a-b], [b], [a], [b-a, 1-a], [], rz)
return T1, T2
v = ctx.hypercomb(h, [a,b], force_series=True)
if ctx._is_real_type(a) and ctx._is_real_type(b) and ctx._is_real_type(z):
v = ctx._re(v)
return +v
except ctx.NoConvergence:
pass
finally:
ctx.prec -= magz
v = ctx.hypsum(1, 1, (atype, btype), [a, b], z, **kwargs)
return v
def _hyp2f1_gosper(ctx,a,b,c,z,**kwargs):
# Use Gosper's recurrence
# See http://www.math.utexas.edu/pipermail/maxima/2006/000126.html
_a,_b,_c,_z = a, b, c, z
orig = ctx.prec
maxprec = kwargs.get('maxprec', 100*orig)
extra = 10
while 1:
ctx.prec = orig + extra
#a = ctx.convert(_a)
#b = ctx.convert(_b)
#c = ctx.convert(_c)
z = ctx.convert(_z)
d = ctx.mpf(0)
e = ctx.mpf(1)
f = ctx.mpf(0)
k = 0
# Common subexpression elimination, unfortunately making
# things a bit unreadable. The formula is quite messy to begin
# with, though...
abz = a*b*z
ch = c * ctx.mpq_1_2
c1h = (c+1) * ctx.mpq_1_2
nz = 1-z
g = z/nz
abg = a*b*g
cba = c-b-a
z2 = z-2
tol = -ctx.prec - 10
nstr = ctx.nstr
nprint = ctx.nprint
mag = ctx.mag
maxmag = ctx.ninf
while 1:
kch = k+ch
kakbz = (k+a)*(k+b)*z / (4*(k+1)*kch*(k+c1h))
d1 = kakbz*(e-(k+cba)*d*g)
e1 = kakbz*(d*abg+(k+c)*e)
ft = d*(k*(cba*z+k*z2-c)-abz)/(2*kch*nz)
f1 = f + e - ft
maxmag = max(maxmag, mag(f1))
if mag(f1-f) < tol:
break
d, e, f = d1, e1, f1
k += 1
cancellation = maxmag - mag(f1)
if cancellation < extra:
break
else:
extra += cancellation
if extra > maxprec:
raise ctx.NoConvergence
return f1
@defun
def _hyp2f1(ctx, a_s, b_s, z, **kwargs):
(a, atype), (b, btype) = a_s
(c, ctype), = b_s
if z == 1:
# TODO: the following logic can be simplified
convergent = ctx.re(c-a-b) > 0
finite = (ctx.isint(a) and a <= 0) or (ctx.isint(b) and b <= 0)
zerodiv = ctx.isint(c) and c <= 0 and not \
((ctx.isint(a) and c <= a <= 0) or (ctx.isint(b) and c <= b <= 0))
#print "bz", a, b, c, z, convergent, finite, zerodiv
# Gauss's theorem gives the value if convergent
if (convergent or finite) and not zerodiv:
return ctx.gammaprod([c, c-a-b], [c-a, c-b], _infsign=True)
# Otherwise, there is a pole and we take the
# sign to be that when approaching from below
# XXX: this evaluation is not necessarily correct in all cases
return ctx.hyp2f1(a,b,c,1-ctx.eps*2) * ctx.inf
# Equal to 1 (first term), unless there is a subsequent
# division by zero
if not z:
# Division by zero but power of z is higher than
# first order so cancels
if c or a == 0 or b == 0:
return 1+z
# Indeterminate
return ctx.nan
# Hit zero denominator unless numerator goes to 0 first
if ctx.isint(c) and c <= 0:
if (ctx.isint(a) and c <= a <= 0) or \
(ctx.isint(b) and c <= b <= 0):
pass
else:
# Pole in series
return ctx.inf
absz = abs(z)
# Fast case: standard series converges rapidly,
# possibly in finitely many terms
if absz <= 0.8 or (ctx.isint(a) and a <= 0 and a >= -1000) or \
(ctx.isint(b) and b <= 0 and b >= -1000):
return ctx.hypsum(2, 1, (atype, btype, ctype), [a, b, c], z, **kwargs)
orig = ctx.prec
try:
ctx.prec += 10
# Use 1/z transformation
if absz >= 1.3:
def h(a,b):
t = ctx.mpq_1-c; ab = a-b; rz = 1/z
T1 = ([-z],[-a], [c,-ab],[b,c-a], [a,t+a],[ctx.mpq_1+ab], rz)
T2 = ([-z],[-b], [c,ab],[a,c-b], [b,t+b],[ctx.mpq_1-ab], rz)
return T1, T2
v = ctx.hypercomb(h, [a,b], **kwargs)
# Use 1-z transformation
elif abs(1-z) <= 0.75:
def h(a,b):
t = c-a-b; ca = c-a; cb = c-b; rz = 1-z
T1 = [], [], [c,t], [ca,cb], [a,b], [1-t], rz
T2 = [rz], [t], [c,a+b-c], [a,b], [ca,cb], [1+t], rz
return T1, T2
v = ctx.hypercomb(h, [a,b], **kwargs)
# Use z/(z-1) transformation
elif abs(z/(z-1)) <= 0.75:
v = ctx.hyp2f1(a, c-b, c, z/(z-1)) / (1-z)**a
# Remaining part of unit circle
else:
v = _hyp2f1_gosper(ctx,a,b,c,z,**kwargs)
finally:
ctx.prec = orig
return +v
@defun
def _hypq1fq(ctx, p, q, a_s, b_s, z, **kwargs):
r"""
Evaluates 3F2, 4F3, 5F4, ...
"""
a_s, a_types = zip(*a_s)
b_s, b_types = zip(*b_s)
a_s = list(a_s)
b_s = list(b_s)
absz = abs(z)
ispoly = False
for a in a_s:
if ctx.isint(a) and a <= 0:
ispoly = True
break
# Direct summation
if absz < 1 or ispoly:
try:
return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs)
except ctx.NoConvergence:
if absz > 1.1 or ispoly:
raise
# Use expansion at |z-1| -> 0.
# Reference: Wolfgang Buhring, "Generalized Hypergeometric Functions at
# Unit Argument", Proc. Amer. Math. Soc., Vol. 114, No. 1 (Jan. 1992),
# pp.145-153
# The current implementation has several problems:
# 1. We only implement it for 3F2. The expansion coefficients are
# given by extremely messy nested sums in the higher degree cases
# (see reference). Is efficient sequential generation of the coefficients
# possible in the > 3F2 case?
# 2. Although the series converges, it may do so slowly, so we need
# convergence acceleration. The acceleration implemented by
# nsum does not always help, so results returned are sometimes
# inaccurate! Can we do better?
# 3. We should check conditions for convergence, and possibly
# do a better job of cancelling out gamma poles if possible.
if z == 1:
# XXX: should also check for division by zero in the
# denominator of the series (cf. hyp2f1)
S = ctx.re(sum(b_s)-sum(a_s))
if S <= 0:
#return ctx.hyper(a_s, b_s, 1-ctx.eps*2, **kwargs) * ctx.inf
return ctx.hyper(a_s, b_s, 0.9, **kwargs) * ctx.inf
if (p,q) == (3,2) and abs(z-1) < 0.05: # and kwargs.get('sum1')
#print "Using alternate summation (experimental)"
a1,a2,a3 = a_s
b1,b2 = b_s
u = b1+b2-a3
initial = ctx.gammaprod([b2-a3,b1-a3,a1,a2],[b2-a3,b1-a3,1,u])
def term(k, _cache={0:initial}):
u = b1+b2-a3+k
if k in _cache:
t = _cache[k]
else:
t = _cache[k-1]
t *= (b1+k-a3-1)*(b2+k-a3-1)
t /= k*(u-1)
_cache[k] = t
return t * ctx.hyp2f1(a1,a2,u,z)
try:
S = ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'),
strict=kwargs.get('strict', True))
return S * ctx.gammaprod([b1,b2],[a1,a2,a3])
except ctx.NoConvergence:
pass
# Try to use convergence acceleration on and close to the unit circle.
# Problem: the convergence acceleration degenerates as |z-1| -> 0,
# except for special cases. Everywhere else, the Shanks transformation
# is very efficient.
if absz < 1.1 and ctx._re(z) <= 1:
def term(kk, _cache={0:ctx.one}):
k = int(kk)
if k != kk:
t = z ** ctx.mpf(kk) / ctx.fac(kk)
for a in a_s: t *= ctx.rf(a,kk)
for b in b_s: t /= ctx.rf(b,kk)
return t
if k in _cache:
return _cache[k]
t = term(k-1)
m = k-1
for j in xrange(p): t *= (a_s[j]+m)
for j in xrange(q): t /= (b_s[j]+m)
t *= z
t /= k
_cache[k] = t
return t
sum_method = kwargs.get('sum_method', 'r+s+e')
try:
return ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'),
strict=kwargs.get('strict', True),
method=sum_method.replace('e',''))
except ctx.NoConvergence:
if 'e' not in sum_method:
raise
pass
if kwargs.get('verbose'):
print("Attempting Euler-Maclaurin summation")
"""
Somewhat slower version (one diffs_exp for each factor).
However, this would be faster with fast direct derivatives
of the gamma function.
def power_diffs(k0):
r = 0
l = ctx.log(z)
while 1:
yield z**ctx.mpf(k0) * l**r
r += 1
def loggamma_diffs(x, reciprocal=False):
sign = (-1) ** reciprocal
yield sign * ctx.loggamma(x)
i = 0
while 1:
yield sign * ctx.psi(i,x)
i += 1
def hyper_diffs(k0):
b2 = b_s + [1]
A = [ctx.diffs_exp(loggamma_diffs(a+k0)) for a in a_s]
B = [ctx.diffs_exp(loggamma_diffs(b+k0,True)) for b in b2]
Z = [power_diffs(k0)]
C = ctx.gammaprod([b for b in b2], [a for a in a_s])
for d in ctx.diffs_prod(A + B + Z):
v = C * d
yield v
"""
def log_diffs(k0):
b2 = b_s + [1]
yield sum(ctx.loggamma(a+k0) for a in a_s) - \
sum(ctx.loggamma(b+k0) for b in b2) + k0*ctx.log(z)
i = 0
while 1:
v = sum(ctx.psi(i,a+k0) for a in a_s) - \
sum(ctx.psi(i,b+k0) for b in b2)
if i == 0:
v += ctx.log(z)
yield v
i += 1
def hyper_diffs(k0):
C = ctx.gammaprod([b for b in b_s], [a for a in a_s])
for d in ctx.diffs_exp(log_diffs(k0)):
v = C * d
yield v
tol = ctx.eps / 1024
prec = ctx.prec
try:
trunc = 50 * ctx.dps
ctx.prec += 20
for i in xrange(5):
head = ctx.fsum(term(k) for k in xrange(trunc))
tail, err = ctx.sumem(term, [trunc, ctx.inf], tol=tol,
adiffs=hyper_diffs(trunc),
verbose=kwargs.get('verbose'),
error=True,
_fast_abort=True)
if err < tol:
v = head + tail
break
trunc *= 2
# Need to increase precision because calculation of
# derivatives may be inaccurate
ctx.prec += ctx.prec//2
if i == 4:
raise ctx.NoConvergence(\
"Euler-Maclaurin summation did not converge")
finally:
ctx.prec = prec
return +v
# Use 1/z transformation
# http://functions.wolfram.com/HypergeometricFunctions/
# HypergeometricPFQ/06/01/05/02/0004/
def h(*args):
a_s = list(args[:p])
b_s = list(args[p:])
Ts = []
recz = ctx.one/z
negz = ctx.fneg(z, exact=True)
for k in range(q+1):
ak = a_s[k]
C = [negz]
Cp = [-ak]
Gn = b_s + [ak] + [a_s[j]-ak for j in range(q+1) if j != k]
Gd = a_s + [b_s[j]-ak for j in range(q)]
Fn = [ak] + [ak-b_s[j]+1 for j in range(q)]
Fd = [1-a_s[j]+ak for j in range(q+1) if j != k]
Ts.append((C, Cp, Gn, Gd, Fn, Fd, recz))
return Ts
return ctx.hypercomb(h, a_s+b_s, **kwargs)
@defun
def _hyp_borel(ctx, p, q, a_s, b_s, z, **kwargs):
if a_s:
a_s, a_types = zip(*a_s)
a_s = list(a_s)
else:
a_s, a_types = [], ()
if b_s:
b_s, b_types = zip(*b_s)
b_s = list(b_s)
else:
b_s, b_types = [], ()
kwargs['maxterms'] = kwargs.get('maxterms', ctx.prec)
try:
return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs)
except ctx.NoConvergence:
pass
prec = ctx.prec
try:
tol = kwargs.get('asymp_tol', ctx.eps/4)
ctx.prec += 10
# hypsum is has a conservative tolerance. So we try again:
def term(k, cache={0:ctx.one}):
if k in cache:
return cache[k]
t = term(k-1)
for a in a_s: t *= (a+(k-1))
for b in b_s: t /= (b+(k-1))
t *= z
t /= k
cache[k] = t
return t
s = ctx.one
for k in xrange(1, ctx.prec):
t = term(k)
s += t
if abs(t) <= tol:
return s
finally:
ctx.prec = prec
if p <= q+3:
contour = kwargs.get('contour')
if not contour:
if ctx.arg(z) < 0.25:
u = z / max(1, abs(z))
if ctx.arg(z) >= 0:
contour = [0, 2j, (2j+2)/u, 2/u, ctx.inf]
else:
contour = [0, -2j, (-2j+2)/u, 2/u, ctx.inf]
#contour = [0, 2j/z, 2/z, ctx.inf]
#contour = [0, 2j, 2/z, ctx.inf]
#contour = [0, 2j, ctx.inf]
else:
contour = [0, ctx.inf]
quad_kwargs = kwargs.get('quad_kwargs', {})
def g(t):
return ctx.exp(-t)*ctx.hyper(a_s, b_s+[1], t*z)
I, err = ctx.quad(g, contour, error=True, **quad_kwargs)
if err <= abs(I)*ctx.eps*8:
return I
raise ctx.NoConvergence
@defun
def _hyp2f2(ctx, a_s, b_s, z, **kwargs):
(a1, a1type), (a2, a2type) = a_s
(b1, b1type), (b2, b2type) = b_s
absz = abs(z)
magz = ctx.mag(z)
orig = ctx.prec
# Asymptotic expansion is ~ exp(z)
asymp_extraprec = magz
# Asymptotic series is in terms of 3F1
can_use_asymptotic = (not kwargs.get('force_series')) and \
(ctx.mag(absz) > 3)
# TODO: much of the following could be shared with 2F3 instead of
# copypasted
if can_use_asymptotic:
#print "using asymp"
try:
try:
ctx.prec += asymp_extraprec
# http://functions.wolfram.com/HypergeometricFunctions/
# Hypergeometric2F2/06/02/02/0002/
def h(a1,a2,b1,b2):
X = a1+a2-b1-b2
A2 = a1+a2
B2 = b1+b2
c = {}
c[0] = ctx.one
c[1] = (A2-1)*X+b1*b2-a1*a2
s1 = 0
k = 0
tprev = 0
while 1:
if k not in c:
uu1 = 1-B2+2*a1+a1**2+2*a2+a2**2-A2*B2+a1*a2+b1*b2+(2*B2-3*(A2+1))*k+2*k**2
uu2 = (k-A2+b1-1)*(k-A2+b2-1)*(k-X-2)
c[k] = ctx.one/k * (uu1*c[k-1]-uu2*c[k-2])
t1 = c[k] * z**(-k)
if abs(t1) < 0.1*ctx.eps:
#print "Convergence :)"
break
# Quit if the series doesn't converge quickly enough
if k > 5 and abs(tprev) / abs(t1) < 1.5:
#print "No convergence :("
raise ctx.NoConvergence
s1 += t1
tprev = t1
k += 1
S = ctx.exp(z)*s1
T1 = [z,S], [X,1], [b1,b2],[a1,a2],[],[],0
T2 = [-z],[-a1],[b1,b2,a2-a1],[a2,b1-a1,b2-a1],[a1,a1-b1+1,a1-b2+1],[a1-a2+1],-1/z
T3 = [-z],[-a2],[b1,b2,a1-a2],[a1,b1-a2,b2-a2],[a2,a2-b1+1,a2-b2+1],[-a1+a2+1],-1/z
return T1, T2, T3
v = ctx.hypercomb(h, [a1,a2,b1,b2], force_series=True, maxterms=4*ctx.prec)
if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,z]) == 5:
v = ctx.re(v)
return v
except ctx.NoConvergence:
pass
finally:
ctx.prec = orig
return ctx.hypsum(2, 2, (a1type, a2type, b1type, b2type), [a1, a2, b1, b2], z, **kwargs)
@defun
def _hyp1f2(ctx, a_s, b_s, z, **kwargs):
(a1, a1type), = a_s
(b1, b1type), (b2, b2type) = b_s
absz = abs(z)
magz = ctx.mag(z)
orig = ctx.prec
# Asymptotic expansion is ~ exp(sqrt(z))
asymp_extraprec = z and magz//2
# Asymptotic series is in terms of 3F0
can_use_asymptotic = (not kwargs.get('force_series')) and \
(ctx.mag(absz) > 19) and \
(ctx.sqrt(absz) > 1.5*orig) #and \
#ctx._hyp_check_convergence([a1, a1-b1+1, a1-b2+1], [],
# 1/absz, orig+40+asymp_extraprec)
# TODO: much of the following could be shared with 2F3 instead of
# copypasted
if can_use_asymptotic:
#print "using asymp"
try:
try:
ctx.prec += asymp_extraprec
# http://functions.wolfram.com/HypergeometricFunctions/
# Hypergeometric1F2/06/02/03/
def h(a1,b1,b2):
X = ctx.mpq_1_2*(a1-b1-b2+ctx.mpq_1_2)
c = {}
c[0] = ctx.one
c[1] = 2*(ctx.mpq_1_4*(3*a1+b1+b2-2)*(a1-b1-b2)+b1*b2-ctx.mpq_3_16)
c[2] = 2*(b1*b2+ctx.mpq_1_4*(a1-b1-b2)*(3*a1+b1+b2-2)-ctx.mpq_3_16)**2+\
ctx.mpq_1_16*(-16*(2*a1-3)*b1*b2 + \
4*(a1-b1-b2)*(-8*a1**2+11*a1+b1+b2-2)-3)
s1 = 0
s2 = 0
k = 0
tprev = 0
while 1:
if k not in c:
uu1 = (3*k**2+(-6*a1+2*b1+2*b2-4)*k + 3*a1**2 - \
(b1-b2)**2 - 2*a1*(b1+b2-2) + ctx.mpq_1_4)
uu2 = (k-a1+b1-b2-ctx.mpq_1_2)*(k-a1-b1+b2-ctx.mpq_1_2)*\
(k-a1+b1+b2-ctx.mpq_5_2)
c[k] = ctx.one/(2*k)*(uu1*c[k-1]-uu2*c[k-2])
w = c[k] * (-z)**(-0.5*k)
t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w
t2 = ctx.j**k * ctx.mpf(2)**(-k) * w
if abs(t1) < 0.1*ctx.eps:
#print "Convergence :)"
break
# Quit if the series doesn't converge quickly enough
if k > 5 and abs(tprev) / abs(t1) < 1.5:
#print "No convergence :("
raise ctx.NoConvergence
s1 += t1
s2 += t2
tprev = t1
k += 1
S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \
ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2
T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2], [a1],\
[], [], 0
T2 = [-z], [-a1], [b1,b2],[b1-a1,b2-a1], \
[a1,a1-b1+1,a1-b2+1], [], 1/z
return T1, T2
v = ctx.hypercomb(h, [a1,b1,b2], force_series=True, maxterms=4*ctx.prec)
if sum(ctx._is_real_type(u) for u in [a1,b1,b2,z]) == 4:
v = ctx.re(v)
return v
except ctx.NoConvergence:
pass
finally:
ctx.prec = orig
#print "not using asymp"
return ctx.hypsum(1, 2, (a1type, b1type, b2type), [a1, b1, b2], z, **kwargs)
@defun
def _hyp2f3(ctx, a_s, b_s, z, **kwargs):
(a1, a1type), (a2, a2type) = a_s
(b1, b1type), (b2, b2type), (b3, b3type) = b_s
absz = abs(z)
magz = ctx.mag(z)
# Asymptotic expansion is ~ exp(sqrt(z))
asymp_extraprec = z and magz//2
orig = ctx.prec
# Asymptotic series is in terms of 4F1
# The square root below empirically provides a plausible criterion
# for the leading series to converge
can_use_asymptotic = (not kwargs.get('force_series')) and \
(ctx.mag(absz) > 19) and (ctx.sqrt(absz) > 1.5*orig)
if can_use_asymptotic:
#print "using asymp"
try:
try:
ctx.prec += asymp_extraprec
# http://functions.wolfram.com/HypergeometricFunctions/
# Hypergeometric2F3/06/02/03/01/0002/
def h(a1,a2,b1,b2,b3):
X = ctx.mpq_1_2*(a1+a2-b1-b2-b3+ctx.mpq_1_2)
A2 = a1+a2
B3 = b1+b2+b3
A = a1*a2
B = b1*b2+b3*b2+b1*b3
R = b1*b2*b3
c = {}
c[0] = ctx.one
c[1] = 2*(B - A + ctx.mpq_1_4*(3*A2+B3-2)*(A2-B3) - ctx.mpq_3_16)
c[2] = ctx.mpq_1_2*c[1]**2 + ctx.mpq_1_16*(-16*(2*A2-3)*(B-A) + 32*R +\
4*(-8*A2**2 + 11*A2 + 8*A + B3 - 2)*(A2-B3)-3)
s1 = 0
s2 = 0
k = 0
tprev = 0
while 1:
if k not in c:
uu1 = (k-2*X-3)*(k-2*X-2*b1-1)*(k-2*X-2*b2-1)*\
(k-2*X-2*b3-1)
uu2 = (4*(k-1)**3 - 6*(4*X+B3)*(k-1)**2 + \
2*(24*X**2+12*B3*X+4*B+B3-1)*(k-1) - 32*X**3 - \
24*B3*X**2 - 4*B - 8*R - 4*(4*B+B3-1)*X + 2*B3-1)
uu3 = (5*(k-1)**2+2*(-10*X+A2-3*B3+3)*(k-1)+2*c[1])
c[k] = ctx.one/(2*k)*(uu1*c[k-3]-uu2*c[k-2]+uu3*c[k-1])
w = c[k] * ctx.power(-z, -0.5*k)
t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w
t2 = ctx.j**k * ctx.mpf(2)**(-k) * w
if abs(t1) < 0.1*ctx.eps:
break
# Quit if the series doesn't converge quickly enough
if k > 5 and abs(tprev) / abs(t1) < 1.5:
raise ctx.NoConvergence
s1 += t1
s2 += t2
tprev = t1
k += 1
S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \
ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2
T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2, b3], [a1, a2],\
[], [], 0
T2 = [-z], [-a1], [b1,b2,b3,a2-a1],[a2,b1-a1,b2-a1,b3-a1], \
[a1,a1-b1+1,a1-b2+1,a1-b3+1], [a1-a2+1], 1/z
T3 = [-z], [-a2], [b1,b2,b3,a1-a2],[a1,b1-a2,b2-a2,b3-a2], \
[a2,a2-b1+1,a2-b2+1,a2-b3+1],[-a1+a2+1], 1/z
return T1, T2, T3
v = ctx.hypercomb(h, [a1,a2,b1,b2,b3], force_series=True, maxterms=4*ctx.prec)
if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,b3,z]) == 6:
v = ctx.re(v)
return v
except ctx.NoConvergence:
pass
finally:
ctx.prec = orig
return ctx.hypsum(2, 3, (a1type, a2type, b1type, b2type, b3type), [a1, a2, b1, b2, b3], z, **kwargs)
@defun
def _hyp2f0(ctx, a_s, b_s, z, **kwargs):
(a, atype), (b, btype) = a_s
# We want to try aggressively to use the asymptotic expansion,
# and fall back only when absolutely necessary
try:
kwargsb = kwargs.copy()
kwargsb['maxterms'] = kwargsb.get('maxterms', ctx.prec)
return ctx.hypsum(2, 0, (atype,btype), [a,b], z, **kwargsb)
except ctx.NoConvergence:
if kwargs.get('force_series'):
raise
pass
def h(a, b):
w = ctx.sinpi(b)
rz = -1/z
T1 = ([ctx.pi,w,rz],[1,-1,a],[],[a-b+1,b],[a],[b],rz)
T2 = ([-ctx.pi,w,rz],[1,-1,1+a-b],[],[a,2-b],[a-b+1],[2-b],rz)
return T1, T2
return ctx.hypercomb(h, [a, 1+a-b], **kwargs)
@defun
def meijerg(ctx, a_s, b_s, z, r=1, series=None, **kwargs):
an, ap = a_s
bm, bq = b_s
n = len(an)
p = n + len(ap)
m = len(bm)
q = m + len(bq)
a = an+ap
b = bm+bq
a = [ctx.convert(_) for _ in a]
b = [ctx.convert(_) for _ in b]
z = ctx.convert(z)
if series is None:
if p < q: series = 1
if p > q: series = 2
if p == q:
if m+n == p and abs(z) > 1:
series = 2
else:
series = 1
if kwargs.get('verbose'):
print("Meijer G m,n,p,q,series =", m,n,p,q,series)
if series == 1:
def h(*args):
a = args[:p]
b = args[p:]
terms = []
for k in range(m):
bases = [z]
expts = [b[k]/r]
gn = [b[j]-b[k] for j in range(m) if j != k]
gn += [1-a[j]+b[k] for j in range(n)]
gd = [a[j]-b[k] for j in range(n,p)]
gd += [1-b[j]+b[k] for j in range(m,q)]
hn = [1-a[j]+b[k] for j in range(p)]
hd = [1-b[j]+b[k] for j in range(q) if j != k]
hz = (-ctx.one)**(p-m-n) * z**(ctx.one/r)
terms.append((bases, expts, gn, gd, hn, hd, hz))
return terms
else:
def h(*args):
a = args[:p]
b = args[p:]
terms = []
for k in range(n):
bases = [z]
if r == 1:
expts = [a[k]-1]
else:
expts = [(a[k]-1)/ctx.convert(r)]
gn = [a[k]-a[j] for j in range(n) if j != k]
gn += [1-a[k]+b[j] for j in range(m)]
gd = [a[k]-b[j] for j in range(m,q)]
gd += [1-a[k]+a[j] for j in range(n,p)]
hn = [1-a[k]+b[j] for j in range(q)]
hd = [1+a[j]-a[k] for j in range(p) if j != k]
hz = (-ctx.one)**(q-m-n) / z**(ctx.one/r)
terms.append((bases, expts, gn, gd, hn, hd, hz))
return terms
return ctx.hypercomb(h, a+b, **kwargs)
@defun_wrapped
def appellf1(ctx,a,b1,b2,c,x,y,**kwargs):
# Assume x smaller
# We will use x for the outer loop
if abs(x) > abs(y):
x, y = y, x
b1, b2 = b2, b1
def ok(x):
return abs(x) < 0.99
# Finite cases
if ctx.isnpint(a):
pass
elif ctx.isnpint(b1):
pass
elif ctx.isnpint(b2):
x, y, b1, b2 = y, x, b2, b1
else:
#print x, y
# Note: ok if |y| > 1, because
# 2F1 implements analytic continuation
if not ok(x):
u1 = (x-y)/(x-1)
if not ok(u1):
raise ValueError("Analytic continuation not implemented")
#print "Using analytic continuation"
return (1-x)**(-b1)*(1-y)**(c-a-b2)*\
ctx.appellf1(c-a,b1,c-b1-b2,c,u1,y,**kwargs)
return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, {'m+n':[c]}, x,y, **kwargs)
@defun
def appellf2(ctx,a,b1,b2,c1,c2,x,y,**kwargs):
# TODO: continuation
return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]},
{'m':[c1],'n':[c2]}, x,y, **kwargs)
@defun
def appellf3(ctx,a1,a2,b1,b2,c,x,y,**kwargs):
outer_polynomial = ctx.isnpint(a1) or ctx.isnpint(b1)
inner_polynomial = ctx.isnpint(a2) or ctx.isnpint(b2)
if not outer_polynomial:
if inner_polynomial or abs(x) > abs(y):
x, y = y, x
a1,a2,b1,b2 = a2,a1,b2,b1
return ctx.hyper2d({'m':[a1,b1],'n':[a2,b2]}, {'m+n':[c]},x,y,**kwargs)
@defun
def appellf4(ctx,a,b,c1,c2,x,y,**kwargs):
# TODO: continuation
return ctx.hyper2d({'m+n':[a,b]}, {'m':[c1],'n':[c2]},x,y,**kwargs)
@defun
def hyper2d(ctx, a, b, x, y, **kwargs):
r"""
Sums the generalized 2D hypergeometric series
.. math ::
\sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{P((a),m,n)}{Q((b),m,n)}
\frac{x^m y^n} {m! n!}
where `(a) = (a_1,\ldots,a_r)`, `(b) = (b_1,\ldots,b_s)` and where
`P` and `Q` are products of rising factorials such as `(a_j)_n` or
`(a_j)_{m+n}`. `P` and `Q` are specified in the form of dicts, with
the `m` and `n` dependence as keys and parameter lists as values.
The supported rising factorials are given in the following table
(note that only a few are supported in `Q`):
+------------+-------------------+--------+
| Key | Rising factorial | `Q` |
+============+===================+========+
| ``'m'`` | `(a_j)_m` | Yes |
+------------+-------------------+--------+
| ``'n'`` | `(a_j)_n` | Yes |
+------------+-------------------+--------+
| ``'m+n'`` | `(a_j)_{m+n}` | Yes |
+------------+-------------------+--------+
| ``'m-n'`` | `(a_j)_{m-n}` | No |
+------------+-------------------+--------+
| ``'n-m'`` | `(a_j)_{n-m}` | No |
+------------+-------------------+--------+
| ``'2m+n'`` | `(a_j)_{2m+n}` | No |
+------------+-------------------+--------+
| ``'2m-n'`` | `(a_j)_{2m-n}` | No |
+------------+-------------------+--------+
| ``'2n-m'`` | `(a_j)_{2n-m}` | No |
+------------+-------------------+--------+
For example, the Appell F1 and F4 functions
.. math ::
F_1 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b)_m (c)_n}{(d)_{m+n}}
\frac{x^m y^n}{m! n!}
F_4 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b)_{m+n}}{(c)_m (d)_{n}}
\frac{x^m y^n}{m! n!}
can be represented respectively as
``hyper2d({'m+n':[a], 'm':[b], 'n':[c]}, {'m+n':[d]}, x, y)``
``hyper2d({'m+n':[a,b]}, {'m':[c], 'n':[d]}, x, y)``
More generally, :func:`~mpmath.hyper2d` can evaluate any of the 34 distinct
convergent second-order (generalized Gaussian) hypergeometric
series enumerated by Horn, as well as the Kampe de Feriet
function.
The series is computed by rewriting it so that the inner
series (i.e. the series containing `n` and `y`) has the form of an
ordinary generalized hypergeometric series and thereby can be
evaluated efficiently using :func:`~mpmath.hyper`. If possible,
manually swapping `x` and `y` and the corresponding parameters
can sometimes give better results.
**Examples**
Two separable cases: a product of two geometric series, and a
product of two Gaussian hypergeometric functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> x, y = mpf(0.25), mpf(0.5)
>>> hyper2d({'m':1,'n':1}, {}, x,y)
2.666666666666666666666667
>>> 1/(1-x)/(1-y)
2.666666666666666666666667
>>> hyper2d({'m':[1,2],'n':[3,4]}, {'m':[5],'n':[6]}, x,y)
4.164358531238938319669856
>>> hyp2f1(1,2,5,x)*hyp2f1(3,4,6,y)
4.164358531238938319669856
Some more series that can be done in closed form::
>>> hyper2d({'m':1,'n':1},{'m+n':1},x,y)
2.013417124712514809623881
>>> (exp(x)*x-exp(y)*y)/(x-y)
2.013417124712514809623881
Six of the 34 Horn functions, G1-G3 and H1-H3::
>>> from mpmath import *
>>> mp.dps = 10; mp.pretty = True
>>> x, y = 0.0625, 0.125
>>> a1,a2,b1,b2,c1,c2,d = 1.1,-1.2,-1.3,-1.4,1.5,-1.6,1.7
>>> hyper2d({'m+n':a1,'n-m':b1,'m-n':b2},{},x,y) # G1
1.139090746
>>> nsum(lambda m,n: rf(a1,m+n)*rf(b1,n-m)*rf(b2,m-n)*\
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
1.139090746
>>> hyper2d({'m':a1,'n':a2,'n-m':b1,'m-n':b2},{},x,y) # G2
0.9503682696
>>> nsum(lambda m,n: rf(a1,m)*rf(a2,n)*rf(b1,n-m)*rf(b2,m-n)*\
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
0.9503682696
>>> hyper2d({'2n-m':a1,'2m-n':a2},{},x,y) # G3
1.029372029
>>> nsum(lambda m,n: rf(a1,2*n-m)*rf(a2,2*m-n)*\
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
1.029372029
>>> hyper2d({'m-n':a1,'m+n':b1,'n':c1},{'m':d},x,y) # H1
-1.605331256
>>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m+n)*rf(c1,n)/rf(d,m)*\
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
-1.605331256
>>> hyper2d({'m-n':a1,'m':b1,'n':[c1,c2]},{'m':d},x,y) # H2
-2.35405404
>>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m)*rf(c1,n)*rf(c2,n)/rf(d,m)*\
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
-2.35405404
>>> hyper2d({'2m+n':a1,'n':b1},{'m+n':c1},x,y) # H3
0.974479074
>>> nsum(lambda m,n: rf(a1,2*m+n)*rf(b1,n)/rf(c1,m+n)*\
... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf])
0.974479074
**References**
1. [SrivastavaKarlsson]_
2. [Weisstein]_ http://mathworld.wolfram.com/HornFunction.html
3. [Weisstein]_ http://mathworld.wolfram.com/AppellHypergeometricFunction.html
"""
x = ctx.convert(x)
y = ctx.convert(y)
def parse(dct, key):
args = dct.pop(key, [])
try:
args = list(args)
except TypeError:
args = [args]
return [ctx.convert(arg) for arg in args]
a_s = dict(a)
b_s = dict(b)
a_m = parse(a, 'm')
a_n = parse(a, 'n')
a_m_add_n = parse(a, 'm+n')
a_m_sub_n = parse(a, 'm-n')
a_n_sub_m = parse(a, 'n-m')
a_2m_add_n = parse(a, '2m+n')
a_2m_sub_n = parse(a, '2m-n')
a_2n_sub_m = parse(a, '2n-m')
b_m = parse(b, 'm')
b_n = parse(b, 'n')
b_m_add_n = parse(b, 'm+n')
if a: raise ValueError("unsupported key: %r" % a.keys()[0])
if b: raise ValueError("unsupported key: %r" % b.keys()[0])
s = 0
outer = ctx.one
m = ctx.mpf(0)
ok_count = 0
prec = ctx.prec
maxterms = kwargs.get('maxterms', 20*prec)
try:
ctx.prec += 10
tol = +ctx.eps
while 1:
inner_sign = 1
outer_sign = 1
inner_a = list(a_n)
inner_b = list(b_n)
outer_a = [a+m for a in a_m]
outer_b = [b+m for b in b_m]
# (a)_{m+n} = (a)_m (a+m)_n
for a in a_m_add_n:
a = a+m
inner_a.append(a)
outer_a.append(a)
# (b)_{m+n} = (b)_m (b+m)_n
for b in b_m_add_n:
b = b+m
inner_b.append(b)
outer_b.append(b)
# (a)_{n-m} = (a-m)_n / (a-m)_m
for a in a_n_sub_m:
inner_a.append(a-m)
outer_b.append(a-m-1)
# (a)_{m-n} = (-1)^(m+n) (1-a-m)_m / (1-a-m)_n
for a in a_m_sub_n:
inner_sign *= (-1)
outer_sign *= (-1)**(m)
inner_b.append(1-a-m)
outer_a.append(-a-m)
# (a)_{2m+n} = (a)_{2m} (a+2m)_n
for a in a_2m_add_n:
inner_a.append(a+2*m)
outer_a.append((a+2*m)*(1+a+2*m))
# (a)_{2m-n} = (-1)^(2m+n) (1-a-2m)_{2m} / (1-a-2m)_n
for a in a_2m_sub_n:
inner_sign *= (-1)
inner_b.append(1-a-2*m)
outer_a.append((a+2*m)*(1+a+2*m))
# (a)_{2n-m} = 4^n ((a-m)/2)_n ((a-m+1)/2)_n / (a-m)_m
for a in a_2n_sub_m:
inner_sign *= 4
inner_a.append(0.5*(a-m))
inner_a.append(0.5*(a-m+1))
outer_b.append(a-m-1)
inner = ctx.hyper(inner_a, inner_b, inner_sign*y,
zeroprec=ctx.prec, **kwargs)
term = outer * inner * outer_sign
if abs(term) < tol:
ok_count += 1
else:
ok_count = 0
if ok_count >= 3 or not outer:
break
s += term
for a in outer_a: outer *= a
for b in outer_b: outer /= b
m += 1
outer = outer * x / m
if m > maxterms:
raise ctx.NoConvergence("maxterms exceeded in hyper2d")
finally:
ctx.prec = prec
return +s
"""
@defun
def kampe_de_feriet(ctx,a,b,c,d,e,f,x,y,**kwargs):
return ctx.hyper2d({'m+n':a,'m':b,'n':c},
{'m+n':d,'m':e,'n':f}, x,y, **kwargs)
"""
@defun
def bihyper(ctx, a_s, b_s, z, **kwargs):
r"""
Evaluates the bilateral hypergeometric series
.. math ::
\,_AH_B(a_1, \ldots, a_k; b_1, \ldots, b_B; z) =
\sum_{n=-\infty}^{\infty}
\frac{(a_1)_n \ldots (a_A)_n}
{(b_1)_n \ldots (b_B)_n} \, z^n
where, for direct convergence, `A = B` and `|z| = 1`, although a
regularized sum exists more generally by considering the
bilateral series as a sum of two ordinary hypergeometric
functions. In order for the series to make sense, none of the
parameters may be integers.
**Examples**
The value of `\,_2H_2` at `z = 1` is given by Dougall's formula::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a,b,c,d = 0.5, 1.5, 2.25, 3.25
>>> bihyper([a,b],[c,d],1)
-14.49118026212345786148847
>>> gammaprod([c,d,1-a,1-b,c+d-a-b-1],[c-a,d-a,c-b,d-b])
-14.49118026212345786148847
The regularized function `\,_1H_0` can be expressed as the
sum of one `\,_2F_0` function and one `\,_1F_1` function::
>>> a = mpf(0.25)
>>> z = mpf(0.75)
>>> bihyper([a], [], z)
(0.2454393389657273841385582 + 0.2454393389657273841385582j)
>>> hyper([a,1],[],z) + (hyper([1],[1-a],-1/z)-1)
(0.2454393389657273841385582 + 0.2454393389657273841385582j)
>>> hyper([a,1],[],z) + hyper([1],[2-a],-1/z)/z/(a-1)
(0.2454393389657273841385582 + 0.2454393389657273841385582j)
**References**
1. [Slater]_ (chapter 6: "Bilateral Series", pp. 180-189)
2. [Wikipedia]_ http://en.wikipedia.org/wiki/Bilateral_hypergeometric_series
"""
z = ctx.convert(z)
c_s = a_s + b_s
p = len(a_s)
q = len(b_s)
if (p, q) == (0,0) or (p, q) == (1,1):
return ctx.zero * z
neg = (p-q) % 2
def h(*c_s):
a_s = list(c_s[:p])
b_s = list(c_s[p:])
aa_s = [2-b for b in b_s]
bb_s = [2-a for a in a_s]
rp = [(-1)**neg * z] + [1-b for b in b_s] + [1-a for a in a_s]
rc = [-1] + [1]*len(b_s) + [-1]*len(a_s)
T1 = [], [], [], [], a_s + [1], b_s, z
T2 = rp, rc, [], [], aa_s + [1], bb_s, (-1)**neg / z
return T1, T2
return ctx.hypercomb(h, c_s, **kwargs)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/hypergeometric.py
|
hypergeometric.py
|
from .functions import defun, defun_wrapped
@defun
def _jacobi_theta2(ctx, z, q):
extra1 = 10
extra2 = 20
# the loops below break when the fixed precision quantities
# a and b go to zero;
# right shifting small negative numbers by wp one obtains -1, not zero,
# so the condition a**2 + b**2 > MIN is used to break the loops.
MIN = 2
if z == ctx.zero:
if (not ctx._im(q)):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
s = x2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
s += a
s = (1 << (wp+1)) + (s << 1)
s = ctx.ldexp(s, -wp)
else:
wp = ctx.prec + extra1
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp-1)
are = bre = x2re
aim = bim = x2im
sre = (1<<wp) + are
sim = aim
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
sre += are
sim += aim
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
else:
if (not ctx._im(q)) and (not ctx._im(z)):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
s = c1 + ((a * cn) >> wp)
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
s += (a * cn) >> wp
s = (s << 1)
s = ctx.ldexp(s, -wp)
s *= ctx.nthroot(q, 4)
return s
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
sre = c1 + ((are * cn) >> wp)
sim = ((aim * cn) >> wp)
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
sre += ((are * cn) >> wp)
sim += ((aim * cn) >> wp)
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
#c2 = (c1*c1 - s1*s1) >> wp
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
#s2 = (c1 * s1) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
#cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre = c1re + ((a * cnre) >> wp)
sim = c1im + ((a * cnim) >> wp)
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre += ((a * cnre) >> wp)
sim += ((a * cnim) >> wp)
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
prec0 = ctx.prec
ctx.prec = wp
# cos(z), sin(z) with z complex
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
n = 1
termre = c1re
termim = c1im
sre = c1re + ((are * cnre - aim * cnim) >> wp)
sim = c1im + ((are * cnim + aim * cnre) >> wp)
n = 3
termre = ((are * cnre - aim * cnim) >> wp)
termim = ((are * cnim + aim * cnre) >> wp)
sre = c1re + ((are * cnre - aim * cnim) >> wp)
sim = c1im + ((are * cnim + aim * cnre) >> wp)
n = 5
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
#cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
termre = ((are * cnre - aim * cnim) >> wp)
termim = ((aim * cnre + are * cnim) >> wp)
sre += ((are * cnre - aim * cnim) >> wp)
sim += ((aim * cnre + are * cnim) >> wp)
n += 2
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
s *= ctx.nthroot(q, 4)
return s
@defun
def _djacobi_theta2(ctx, z, q, nd):
MIN = 2
extra1 = 10
extra2 = 20
if (not ctx._im(q)) and (not ctx._im(z)):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if (nd&1):
s = s1 + ((a * sn * 3**nd) >> wp)
else:
s = c1 + ((a * cn * 3**nd) >> wp)
n = 2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if nd&1:
s += (a * sn * (2*n+1)**nd) >> wp
else:
s += (a * cn * (2*n+1)**nd) >> wp
n += 1
s = -(s << 1)
s = ctx.ldexp(s, -wp)
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if (nd&1):
sre = s1 + ((are * sn * 3**nd) >> wp)
sim = ((aim * sn * 3**nd) >> wp)
else:
sre = c1 + ((are * cn * 3**nd) >> wp)
sim = ((aim * cn * 3**nd) >> wp)
n = 5
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if (nd&1):
sre += ((are * sn * n**nd) >> wp)
sim += ((aim * sn * n**nd) >> wp)
else:
sre += ((are * cn * n**nd) >> wp)
sim += ((aim * cn * n**nd) >> wp)
n += 2
sre = -(sre << 1)
sim = -(sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
#c2 = (c1*c1 - s1*s1) >> wp
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
#s2 = (c1 * s1) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
#cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre = s1re + ((a * snre * 3**nd) >> wp)
sim = s1im + ((a * snim * 3**nd) >> wp)
else:
sre = c1re + ((a * cnre * 3**nd) >> wp)
sim = c1im + ((a * cnim * 3**nd) >> wp)
n = 5
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre += ((a * snre * n**nd) >> wp)
sim += ((a * snim * n**nd) >> wp)
else:
sre += ((a * cnre * n**nd) >> wp)
sim += ((a * cnim * n**nd) >> wp)
n += 2
sre = -(sre << 1)
sim = -(sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
prec0 = ctx.prec
ctx.prec = wp
# cos(2*z), sin(2*z) with z complex
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre = s1re + (((are * snre - aim * snim) * 3**nd) >> wp)
sim = s1im + (((are * snim + aim * snre)* 3**nd) >> wp)
else:
sre = c1re + (((are * cnre - aim * cnim) * 3**nd) >> wp)
sim = c1im + (((are * cnim + aim * cnre)* 3**nd) >> wp)
n = 5
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
#cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre += (((are * snre - aim * snim) * n**nd) >> wp)
sim += (((aim * snre + are * snim) * n**nd) >> wp)
else:
sre += (((are * cnre - aim * cnim) * n**nd) >> wp)
sim += (((aim * cnre + are * cnim) * n**nd) >> wp)
n += 2
sre = -(sre << 1)
sim = -(sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
s *= ctx.nthroot(q, 4)
if (nd&1):
return (-1)**(nd//2) * s
else:
return (-1)**(1 + nd//2) * s
@defun
def _jacobi_theta3(ctx, z, q):
extra1 = 10
extra2 = 20
MIN = 2
if z == ctx.zero:
if not ctx._im(q):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
s = x
a = b = x
x2 = (x*x) >> wp
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
s += a
s = (1 << wp) + (s << 1)
s = ctx.ldexp(s, -wp)
return s
else:
wp = ctx.prec + extra1
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
sre = are = bre = xre
sim = aim = bim = xim
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
sre += are
sim += aim
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
else:
if (not ctx._im(q)) and (not ctx._im(z)):
s = 0
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
s += (a * cn) >> wp
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
s += (a * cn) >> wp
s = (1 << wp) + (s << 1)
s = ctx.ldexp(s, -wp)
return s
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
sre = (are * cn) >> wp
sim = (aim * cn) >> wp
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
sre += (are * cn) >> wp
sim += (aim * cn) >> wp
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
sre = (a * cnre) >> wp
sim = (a * cnim) >> wp
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre += (a * cnre) >> wp
sim += (a * cnim) >> wp
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
prec0 = ctx.prec
ctx.prec = wp
# cos(2*z), sin(2*z) with z complex
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
sre = (are * cnre - aim * cnim) >> wp
sim = (aim * cnre + are * cnim) >> wp
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre += (are * cnre - aim * cnim) >> wp
sim += (aim * cnre + are * cnim) >> wp
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
@defun
def _djacobi_theta3(ctx, z, q, nd):
"""nd=1,2,3 order of the derivative with respect to z"""
MIN = 2
extra1 = 10
extra2 = 20
if (not ctx._im(q)) and (not ctx._im(z)):
s = 0
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
if (nd&1):
s += (a * sn) >> wp
else:
s += (a * cn) >> wp
n = 2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
if nd&1:
s += (a * sn * n**nd) >> wp
else:
s += (a * cn * n**nd) >> wp
n += 1
s = -(s << (nd+1))
s = ctx.ldexp(s, -wp)
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
if (nd&1):
sre = (are * sn) >> wp
sim = (aim * sn) >> wp
else:
sre = (are * cn) >> wp
sim = (aim * cn) >> wp
n = 2
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
if nd&1:
sre += (are * sn * n**nd) >> wp
sim += (aim * sn * n**nd) >> wp
else:
sre += (are * cn * n**nd) >> wp
sim += (aim * cn * n**nd) >> wp
n += 1
sre = -(sre << (nd+1))
sim = -(sim << (nd+1))
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
if (nd&1):
sre = (a * snre) >> wp
sim = (a * snim) >> wp
else:
sre = (a * cnre) >> wp
sim = (a * cnim) >> wp
n = 2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre += (a * snre * n**nd) >> wp
sim += (a * snim * n**nd) >> wp
else:
sre += (a * cnre * n**nd) >> wp
sim += (a * cnim * n**nd) >> wp
n += 1
sre = -(sre << (nd+1))
sim = -(sim << (nd+1))
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
prec0 = ctx.prec
ctx.prec = wp
# cos(2*z), sin(2*z) with z complex
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
if (nd&1):
sre = (are * snre - aim * snim) >> wp
sim = (aim * snre + are * snim) >> wp
else:
sre = (are * cnre - aim * cnim) >> wp
sim = (aim * cnre + are * cnim) >> wp
n = 2
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if(nd&1):
sre += ((are * snre - aim * snim) * n**nd) >> wp
sim += ((aim * snre + are * snim) * n**nd) >> wp
else:
sre += ((are * cnre - aim * cnim) * n**nd) >> wp
sim += ((aim * cnre + are * cnim) * n**nd) >> wp
n += 1
sre = -(sre << (nd+1))
sim = -(sim << (nd+1))
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
if (nd&1):
return (-1)**(nd//2) * s
else:
return (-1)**(1 + nd//2) * s
@defun
def _jacobi_theta2a(ctx, z, q):
"""
case ctx._im(z) != 0
theta(2, z, q) =
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=-inf, inf)
max term for minimum (2*n+1)*log(q).real - 2* ctx._im(z)
n0 = int(ctx._im(z)/log(q).real - 1/2)
theta(2, z, q) =
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=n0, inf) +
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n, n0-1, -inf)
"""
n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2)
e2 = ctx.expj(2*z)
e = e0 = ctx.expj((2*n+1)*z)
a = q**(n*n + n)
# leading term
term = a * e
s = term
eps1 = ctx.eps*abs(term)
while 1:
n += 1
e = e * e2
term = q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
term = q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
s = s * ctx.nthroot(q, 4)
return s
@defun
def _jacobi_theta3a(ctx, z, q):
"""
case ctx._im(z) != 0
theta3(z, q) = Sum(q**(n*n) * exp(j*2*n*z), n, -inf, inf)
max term for n*abs(log(q).real) + ctx._im(z) ~= 0
n0 = int(- ctx._im(z)/abs(log(q).real))
"""
n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q))))
e2 = ctx.expj(2*z)
e = e0 = ctx.expj(2*n*z)
s = term = q**(n*n) * e
eps1 = ctx.eps*abs(term)
while 1:
n += 1
e = e * e2
term = q**(n*n) * e
if abs(term) < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
term = q**(n*n) * e
if abs(term) < eps1:
break
s += term
return s
@defun
def _djacobi_theta2a(ctx, z, q, nd):
"""
case ctx._im(z) != 0
dtheta(2, z, q, nd) =
j* q**1/4 * Sum(q**(n*n + n) * (2*n+1)*exp(j*(2*n + 1)*z), n=-inf, inf)
max term for (2*n0+1)*log(q).real - 2* ctx._im(z) ~= 0
n0 = int(ctx._im(z)/log(q).real - 1/2)
"""
n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2)
e2 = ctx.expj(2*z)
e = e0 = ctx.expj((2*n + 1)*z)
a = q**(n*n + n)
# leading term
term = (2*n+1)**nd * a * e
s = term
eps1 = ctx.eps*abs(term)
while 1:
n += 1
e = e * e2
term = (2*n+1)**nd * q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
term = (2*n+1)**nd * q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
return ctx.j**nd * s * ctx.nthroot(q, 4)
@defun
def _djacobi_theta3a(ctx, z, q, nd):
"""
case ctx._im(z) != 0
djtheta3(z, q, nd) = (2*j)**nd *
Sum(q**(n*n) * n**nd * exp(j*2*n*z), n, -inf, inf)
max term for minimum n*abs(log(q).real) + ctx._im(z)
"""
n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q))))
e2 = ctx.expj(2*z)
e = e0 = ctx.expj(2*n*z)
a = q**(n*n) * e
s = term = n**nd * a
if n != 0:
eps1 = ctx.eps*abs(term)
else:
eps1 = ctx.eps*abs(a)
while 1:
n += 1
e = e * e2
a = q**(n*n) * e
term = n**nd * a
if n != 0:
aterm = abs(term)
else:
aterm = abs(a)
if aterm < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
a = q**(n*n) * e
term = n**nd * a
if n != 0:
aterm = abs(term)
else:
aterm = abs(a)
if aterm < eps1:
break
s += term
return (2*ctx.j)**nd * s
@defun
def jtheta(ctx, n, z, q, derivative=0):
if derivative:
return ctx._djtheta(n, z, q, derivative)
z = ctx.convert(z)
q = ctx.convert(q)
# Implementation note
# If ctx._im(z) is close to zero, _jacobi_theta2 and _jacobi_theta3
# are used,
# which compute the series starting from n=0 using fixed precision
# numbers;
# otherwise _jacobi_theta2a and _jacobi_theta3a are used, which compute
# the series starting from n=n0, which is the largest term.
# TODO: write _jacobi_theta2a and _jacobi_theta3a using fixed-point
if abs(q) > ctx.THETA_Q_LIM:
raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM)
extra = 10
if z:
M = ctx.mag(z)
if M > 5 or (n == 1 and M < -5):
extra += 2*abs(M)
cz = 0.5
extra2 = 50
prec0 = ctx.prec
try:
ctx.prec += extra
if n == 1:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta2(z - ctx.pi/2, q)
else:
ctx.dps += 10
res = ctx._jacobi_theta2a(z - ctx.pi/2, q)
else:
res = ctx._jacobi_theta2(z - ctx.pi/2, q)
elif n == 2:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta2(z, q)
else:
ctx.dps += 10
res = ctx._jacobi_theta2a(z, q)
else:
res = ctx._jacobi_theta2(z, q)
elif n == 3:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta3(z, q)
else:
ctx.dps += 10
res = ctx._jacobi_theta3a(z, q)
else:
res = ctx._jacobi_theta3(z, q)
elif n == 4:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta3(z, -q)
else:
ctx.dps += 10
res = ctx._jacobi_theta3a(z, -q)
else:
res = ctx._jacobi_theta3(z, -q)
else:
raise ValueError
finally:
ctx.prec = prec0
return res
@defun
def _djtheta(ctx, n, z, q, derivative=1):
z = ctx.convert(z)
q = ctx.convert(q)
nd = int(derivative)
if abs(q) > ctx.THETA_Q_LIM:
raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM)
extra = 10 + ctx.prec * nd // 10
if z:
M = ctx.mag(z)
if M > 5 or (n != 1 and M < -5):
extra += 2*abs(M)
cz = 0.5
extra2 = 50
prec0 = ctx.prec
try:
ctx.prec += extra
if n == 1:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta2a(z - ctx.pi/2, q, nd)
else:
res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd)
elif n == 2:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta2(z, q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta2a(z, q, nd)
else:
res = ctx._djacobi_theta2(z, q, nd)
elif n == 3:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta3(z, q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta3a(z, q, nd)
else:
res = ctx._djacobi_theta3(z, q, nd)
elif n == 4:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta3(z, -q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta3a(z, -q, nd)
else:
res = ctx._djacobi_theta3(z, -q, nd)
else:
raise ValueError
finally:
ctx.prec = prec0
return +res
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/theta.py
|
theta.py
|
from ..libmp.backend import xrange
from .functions import defun, defun_wrapped
@defun
def gammaprod(ctx, a, b, _infsign=False):
a = [ctx.convert(x) for x in a]
b = [ctx.convert(x) for x in b]
poles_num = []
poles_den = []
regular_num = []
regular_den = []
for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x)
for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x)
# One more pole in numerator or denominator gives 0 or inf
if len(poles_num) < len(poles_den): return ctx.zero
if len(poles_num) > len(poles_den):
# Get correct sign of infinity for x+h, h -> 0 from above
# XXX: hack, this should be done properly
if _infsign:
a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num]
b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den]
return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf
else:
return ctx.inf
# All poles cancel
# lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i)
p = ctx.one
orig = ctx.prec
try:
ctx.prec = orig + 15
while poles_num:
i = poles_num.pop()
j = poles_den.pop()
p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i)
for x in regular_num: p *= ctx.gamma(x)
for x in regular_den: p /= ctx.gamma(x)
finally:
ctx.prec = orig
return +p
@defun
def beta(ctx, x, y):
x = ctx.convert(x)
y = ctx.convert(y)
if ctx.isinf(y):
x, y = y, x
if ctx.isinf(x):
if x == ctx.inf and not ctx._im(y):
if y == ctx.ninf:
return ctx.nan
if y > 0:
return ctx.zero
if ctx.isint(y):
return ctx.nan
if y < 0:
return ctx.sign(ctx.gamma(y)) * ctx.inf
return ctx.nan
return ctx.gammaprod([x, y], [x+y])
@defun
def binomial(ctx, n, k):
return ctx.gammaprod([n+1], [k+1, n-k+1])
@defun
def rf(ctx, x, n):
return ctx.gammaprod([x+n], [x])
@defun
def ff(ctx, x, n):
return ctx.gammaprod([x+1], [x-n+1])
@defun_wrapped
def fac2(ctx, x):
if ctx.isinf(x):
if x == ctx.inf:
return x
return ctx.nan
return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1)
@defun_wrapped
def barnesg(ctx, z):
if ctx.isinf(z):
if z == ctx.inf:
return z
return ctx.nan
if ctx.isnan(z):
return z
if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)):
return z*0
# Account for size (would not be needed if computing log(G))
if abs(z) > 5:
ctx.dps += 2*ctx.log(abs(z),2)
# Reflection formula
if ctx.re(z) < -ctx.dps:
w = 1-z
pi2 = 2*ctx.pi
u = ctx.expjpi(2*w)
v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \
ctx.j*ctx.polylog(2, u)/pi2
v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w
if ctx._is_real_type(z):
v = ctx._re(v)
return v
# Estimate terms for asymptotic expansion
# TODO: fixme, obviously
N = ctx.dps // 2 + 5
G = 1
while abs(z) < N or ctx.re(z) < 1:
G /= ctx.gamma(z)
z += 1
z -= 1
s = ctx.mpf(1)/12
s -= ctx.log(ctx.glaisher)
s += z*ctx.log(2*ctx.pi)/2
s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z)
s -= 3*z**2/4
z2k = z2 = z**2
for k in xrange(1, N+1):
t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k)
if abs(t) < ctx.eps:
#print k, N # check how many terms were needed
break
z2k *= z2
s += t
#if k == N:
# print "warning: series for barnesg failed to converge", ctx.dps
return G*ctx.exp(s)
@defun
def superfac(ctx, z):
return ctx.barnesg(z+2)
@defun_wrapped
def hyperfac(ctx, z):
# XXX: estimate needed extra bits accurately
if z == ctx.inf:
return z
if abs(z) > 5:
extra = 4*int(ctx.log(abs(z),2))
else:
extra = 0
ctx.prec += extra
if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)):
n = int(ctx.re(z))
h = ctx.hyperfac(-n-1)
if ((n+1)//2) & 1:
h = -h
if ctx._is_complex_type(z):
return h + 0j
return h
zp1 = z+1
# Wrong branch cut
#v = ctx.gamma(zp1)**z
#ctx.prec -= extra
#return v / ctx.barnesg(zp1)
v = ctx.exp(z*ctx.loggamma(zp1))
ctx.prec -= extra
return v / ctx.barnesg(zp1)
@defun_wrapped
def loggamma_old(ctx, z):
a = ctx._re(z)
b = ctx._im(z)
if not b and a > 0:
return ctx.ln(ctx.gamma_old(z))
u = ctx.arg(z)
w = ctx.ln(ctx.gamma_old(z))
if b:
gi = -b - u/2 + a*u + b*ctx.ln(abs(z))
n = ctx.floor((gi-ctx._im(w))/(2*ctx.pi)+0.5) * (2*ctx.pi)
return w + n*ctx.j
elif a < 0:
n = int(ctx.floor(a))
w += (n-(n%2))*ctx.pi*ctx.j
return w
'''
@defun
def psi0(ctx, z):
"""Shortcut for psi(0,z) (the digamma function)"""
return ctx.psi(0, z)
@defun
def psi1(ctx, z):
"""Shortcut for psi(1,z) (the trigamma function)"""
return ctx.psi(1, z)
@defun
def psi2(ctx, z):
"""Shortcut for psi(2,z) (the tetragamma function)"""
return ctx.psi(2, z)
@defun
def psi3(ctx, z):
"""Shortcut for psi(3,z) (the pentagamma function)"""
return ctx.psi(3, z)
'''
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/factorials.py
|
factorials.py
|
from .functions import defun, defun_wrapped
def find_rosser_block_zero(ctx, n):
"""for n<400 000 000 determines a block were one find our zero"""
for k in range(len(_ROSSER_EXCEPTIONS)//2):
a=_ROSSER_EXCEPTIONS[2*k][0]
b=_ROSSER_EXCEPTIONS[2*k][1]
if ((a<= n-2) and (n-1 <= b)):
t0 = ctx.grampoint(a)
t1 = ctx.grampoint(b)
v0 = ctx._fp.siegelz(t0)
v1 = ctx._fp.siegelz(t1)
my_zero_number = n-a-1
zero_number_block = b-a
pattern = _ROSSER_EXCEPTIONS[2*k+1]
return (my_zero_number, [a,b], [t0,t1], [v0,v1])
k = n-2
t,v,b = compute_triple_tvb(ctx, k)
T = [t]
V = [v]
while b < 0:
k -= 1
t,v,b = compute_triple_tvb(ctx, k)
T.insert(0,t)
V.insert(0,v)
my_zero_number = n-k-1
m = n-1
t,v,b = compute_triple_tvb(ctx, m)
T.append(t)
V.append(v)
while b < 0:
m += 1
t,v,b = compute_triple_tvb(ctx, m)
T.append(t)
V.append(v)
return (my_zero_number, [k,m], T, V)
def wpzeros(t):
"""Precision needed to compute higher zeros"""
wp = 53
if t > 3*10**8:
wp = 63
if t > 10**11:
wp = 70
if t > 10**14:
wp = 83
return wp
def separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=None,
fp_tolerance=None):
"""Separate the zeros contained in the block T, limitloop
determines how long one must search"""
if limitloop is None:
limitloop = ctx.inf
loopnumber = 0
variations = count_variations(V)
while ((variations < zero_number_block) and (loopnumber <limitloop)):
a = T[0]
v = V[0]
newT = [a]
newV = [v]
variations = 0
for n in range(1,len(T)):
b2 = T[n]
u = V[n]
if (u*v>0):
alpha = ctx.sqrt(u/v)
b= (alpha*a+b2)/(alpha+1)
else:
b = (a+b2)/2
if fp_tolerance < 10:
w = ctx._fp.siegelz(b)
if abs(w)<fp_tolerance:
w = ctx.siegelz(b)
else:
w=ctx.siegelz(b)
if v*w<0:
variations += 1
newT.append(b)
newV.append(w)
u = V[n]
if u*w <0:
variations += 1
newT.append(b2)
newV.append(u)
a = b2
v = u
T = newT
V = newV
loopnumber +=1
if (limitloop>ITERATION_LIMIT)and(loopnumber>2)and(variations+2==zero_number_block):
dtMax=0
dtSec=0
kMax = 0
for k1 in range(1,len(T)):
dt = T[k1]-T[k1-1]
if dt > dtMax:
kMax=k1
dtSec = dtMax
dtMax = dt
elif (dt<dtMax) and(dt >dtSec):
dtSec = dt
if dtMax>3*dtSec:
f = lambda x: ctx.rs_z(x,derivative=1)
t0=T[kMax-1]
t1 = T[kMax]
t=ctx.findroot(f, (t0,t1), solver ='illinois',verify=False, verbose=False)
v = ctx.siegelz(t)
if (t0<t) and (t<t1) and (v*V[kMax]<0):
T.insert(kMax,t)
V.insert(kMax,v)
variations = count_variations(V)
if variations == zero_number_block:
separated = True
else:
separated = False
return (T,V, separated)
def separate_my_zero(ctx, my_zero_number, zero_number_block, T, V, prec):
"""If we know which zero of this block is mine,
the function separates the zero"""
variations = 0
v0 = V[0]
for k in range(1,len(V)):
v1 = V[k]
if v0*v1 < 0:
variations +=1
if variations == my_zero_number:
k0 = k
leftv = v0
rightv = v1
v0 = v1
t1 = T[k0]
t0 = T[k0-1]
ctx.prec = prec
r = ctx.findroot(lambda x:ctx.siegelz(x), (t0,t1), solver ='illinois', verbose=False)
return r
def sure_number_block(ctx, n):
"""The number of good Rosser blocks needed to apply
Turing method
References:
R. P. Brent, On the Zeros of the Riemann Zeta Function
in the Critical Strip, Math. Comp. 33 (1979) 1361--1372
T. Trudgian, Improvements to Turing Method, Math. Comp."""
if n < 9*10**5:
return(2)
g = ctx.grampoint(n-100)
lg = ctx._fp.ln(g)
brent = 0.0061 * lg**2 +0.08*lg
trudgian = 0.0031 * lg**2 +0.11*lg
N = ctx.ceil(min(brent,trudgian))
N = int(N)
return N
def compute_triple_tvb(ctx, n):
t = ctx.grampoint(n)
v = ctx._fp.siegelz(t)
if ctx.mag(abs(v))<ctx.mag(t)-45:
v = ctx.siegelz(t)
b = v*(-1)**n
return t,v,b
ITERATION_LIMIT = 4
def search_supergood_block(ctx, n, fp_tolerance):
"""To use for n>400 000 000"""
sb = sure_number_block(ctx, n)
number_goodblocks = 0
m2 = n-1
t, v, b = compute_triple_tvb(ctx, m2)
Tf = [t]
Vf = [v]
while b < 0:
m2 += 1
t,v,b = compute_triple_tvb(ctx, m2)
Tf.append(t)
Vf.append(v)
goodpoints = [m2]
T = [t]
V = [v]
while number_goodblocks < 2*sb:
m2 += 1
t, v, b = compute_triple_tvb(ctx, m2)
T.append(t)
V.append(v)
while b < 0:
m2 += 1
t,v,b = compute_triple_tvb(ctx, m2)
T.append(t)
V.append(v)
goodpoints.append(m2)
zn = len(T)-1
A, B, separated =\
separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT,
fp_tolerance=fp_tolerance)
Tf.pop()
Tf.extend(A)
Vf.pop()
Vf.extend(B)
if separated:
number_goodblocks += 1
else:
number_goodblocks = 0
T = [t]
V = [v]
# Now the same procedure to the left
number_goodblocks = 0
m2 = n-2
t, v, b = compute_triple_tvb(ctx, m2)
Tf.insert(0,t)
Vf.insert(0,v)
while b < 0:
m2 -= 1
t,v,b = compute_triple_tvb(ctx, m2)
Tf.insert(0,t)
Vf.insert(0,v)
goodpoints.insert(0,m2)
T = [t]
V = [v]
while number_goodblocks < 2*sb:
m2 -= 1
t, v, b = compute_triple_tvb(ctx, m2)
T.insert(0,t)
V.insert(0,v)
while b < 0:
m2 -= 1
t,v,b = compute_triple_tvb(ctx, m2)
T.insert(0,t)
V.insert(0,v)
goodpoints.insert(0,m2)
zn = len(T)-1
A, B, separated =\
separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance)
A.pop()
Tf = A+Tf
B.pop()
Vf = B+Vf
if separated:
number_goodblocks += 1
else:
number_goodblocks = 0
T = [t]
V = [v]
r = goodpoints[2*sb]
lg = len(goodpoints)
s = goodpoints[lg-2*sb-1]
tr, vr, br = compute_triple_tvb(ctx, r)
ar = Tf.index(tr)
ts, vs, bs = compute_triple_tvb(ctx, s)
as1 = Tf.index(ts)
T = Tf[ar:as1+1]
V = Vf[ar:as1+1]
zn = s-r
A, B, separated =\
separate_zeros_in_block(ctx, zn,T,V,limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance)
if separated:
return (n-r-1,[r,s],A,B)
q = goodpoints[sb]
lg = len(goodpoints)
t = goodpoints[lg-sb-1]
tq, vq, bq = compute_triple_tvb(ctx, q)
aq = Tf.index(tq)
tt, vt, bt = compute_triple_tvb(ctx, t)
at = Tf.index(tt)
T = Tf[aq:at+1]
V = Vf[aq:at+1]
return (n-q-1,[q,t],T,V)
def count_variations(V):
count = 0
vold = V[0]
for n in range(1, len(V)):
vnew = V[n]
if vold*vnew < 0:
count +=1
vold = vnew
return count
def pattern_construct(ctx, block, T, V):
pattern = '('
a = block[0]
b = block[1]
t0,v0,b0 = compute_triple_tvb(ctx, a)
k = 0
k0 = 0
for n in range(a+1,b+1):
t1,v1,b1 = compute_triple_tvb(ctx, n)
lgT =len(T)
while (k < lgT) and (T[k] <= t1):
k += 1
L = V[k0:k]
L.append(v1)
L.insert(0,v0)
count = count_variations(L)
pattern = pattern + ("%s" % count)
if b1 > 0:
pattern = pattern + ')('
k0 = k
t0,v0,b0 = t1,v1,b1
pattern = pattern[:-1]
return pattern
@defun
def zetazero(ctx, n, info=False, round=True):
r"""
Computes the `n`-th nontrivial zero of `\zeta(s)` on the critical line,
i.e. returns an approximation of the `n`-th largest complex number
`s = \frac{1}{2} + ti` for which `\zeta(s) = 0`. Equivalently, the
imaginary part `t` is a zero of the Z-function (:func:`~mpmath.siegelz`).
**Examples**
The first few zeros::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> zetazero(1)
(0.5 + 14.13472514173469379045725j)
>>> zetazero(2)
(0.5 + 21.02203963877155499262848j)
>>> zetazero(20)
(0.5 + 77.14484006887480537268266j)
Verifying that the values are zeros::
>>> for n in range(1,5):
... s = zetazero(n)
... chop(zeta(s)), chop(siegelz(s.imag))
...
(0.0, 0.0)
(0.0, 0.0)
(0.0, 0.0)
(0.0, 0.0)
Negative indices give the conjugate zeros (`n = 0` is undefined)::
>>> zetazero(-1)
(0.5 - 14.13472514173469379045725j)
:func:`~mpmath.zetazero` supports arbitrarily large `n` and arbitrary precision::
>>> mp.dps = 15
>>> zetazero(1234567)
(0.5 + 727690.906948208j)
>>> mp.dps = 50
>>> zetazero(1234567)
(0.5 + 727690.9069482075392389420041147142092708393819935j)
>>> chop(zeta(_)/_)
0.0
with *info=True*, :func:`~mpmath.zetazero` gives additional information::
>>> mp.dps = 15
>>> zetazero(542964976,info=True)
((0.5 + 209039046.578535j), [542964969, 542964978], 6, '(013111110)')
This means that the zero is between Gram points 542964969 and 542964978;
it is the 6-th zero between them. Finally (01311110) is the pattern
of zeros in this interval. The numbers indicate the number of zeros
in each Gram interval (Rosser blocks between parenthesis). In this case
there is only one Rosser block of length nine.
"""
n = int(n)
if n < 0:
return ctx.zetazero(-n).conjugate()
if n == 0:
raise ValueError("n must be nonzero")
wpinitial = ctx.prec
try:
wpz, fp_tolerance = comp_fp_tolerance(ctx, n)
ctx.prec = wpz
if n < 400000000:
my_zero_number, block, T, V =\
find_rosser_block_zero(ctx, n)
else:
my_zero_number, block, T, V =\
search_supergood_block(ctx, n, fp_tolerance)
zero_number_block = block[1]-block[0]
T, V, separated = separate_zeros_in_block(ctx, zero_number_block, T, V,
limitloop=ctx.inf, fp_tolerance=fp_tolerance)
if info:
pattern = pattern_construct(ctx,block,T,V)
prec = max(wpinitial, wpz)
t = separate_my_zero(ctx, my_zero_number, zero_number_block,T,V,prec)
v = ctx.mpc(0.5,t)
finally:
ctx.prec = wpinitial
if round:
v =+v
if info:
return (v,block,my_zero_number,pattern)
else:
return v
def gram_index(ctx, t):
if t > 10**13:
wp = 3*ctx.log(t, 10)
else:
wp = 0
prec = ctx.prec
try:
ctx.prec += wp
x0 = (t/(2*ctx.pi))*ctx.log(t/(2*ctx.pi))
h = ctx.findroot(lambda x:ctx.siegeltheta(t)-ctx.pi*x, x0)
h = int(h)
finally:
ctx.prec = prec
return(h)
def count_to(ctx, t, T, V):
count = 0
vold = V[0]
told = T[0]
tnew = T[1]
k = 1
while tnew < t:
vnew = V[k]
if vold*vnew < 0:
count += 1
vold = vnew
k += 1
tnew = T[k]
a = ctx.siegelz(t)
if a*vold < 0:
count += 1
return count
def comp_fp_tolerance(ctx, n):
wpz = wpzeros(n*ctx.log(n))
if n < 15*10**8:
fp_tolerance = 0.0005
elif n <= 10**14:
fp_tolerance = 0.1
else:
fp_tolerance = 100
return wpz, fp_tolerance
@defun
def nzeros(ctx, t):
r"""
Computes the number of zeros of the Riemann zeta function in
`(0,1) \times (0,t]`, usually denoted by `N(t)`.
**Examples**
The first zero has imaginary part between 14 and 15::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nzeros(14)
0
>>> nzeros(15)
1
>>> zetazero(1)
(0.5 + 14.1347251417347j)
Some closely spaced zeros::
>>> nzeros(10**7)
21136125
>>> zetazero(21136125)
(0.5 + 9999999.32718175j)
>>> zetazero(21136126)
(0.5 + 10000000.2400236j)
>>> nzeros(545439823.215)
1500000001
>>> zetazero(1500000001)
(0.5 + 545439823.201985j)
>>> zetazero(1500000002)
(0.5 + 545439823.325697j)
This confirms the data given by J. van de Lune,
H. J. J. te Riele and D. T. Winter in 1986.
"""
if t < 14.1347251417347:
return 0
x = gram_index(ctx, t)
k = int(ctx.floor(x))
wpinitial = ctx.prec
wpz, fp_tolerance = comp_fp_tolerance(ctx, k)
ctx.prec = wpz
a = ctx.siegelz(t)
if k == -1 and a < 0:
return 0
elif k == -1 and a > 0:
return 1
if k+2 < 400000000:
Rblock = find_rosser_block_zero(ctx, k+2)
else:
Rblock = search_supergood_block(ctx, k+2, fp_tolerance)
n1, n2 = Rblock[1]
if n2-n1 == 1:
b = Rblock[3][0]
if a*b > 0:
ctx.prec = wpinitial
return k+1
else:
ctx.prec = wpinitial
return k+2
my_zero_number,block, T, V = Rblock
zero_number_block = n2-n1
T, V, separated = separate_zeros_in_block(ctx,\
zero_number_block, T, V,\
limitloop=ctx.inf,\
fp_tolerance=fp_tolerance)
n = count_to(ctx, t, T, V)
ctx.prec = wpinitial
return n+n1+1
@defun_wrapped
def backlunds(ctx, t):
r"""
Computes the function
`S(t) = \operatorname{arg} \zeta(\frac{1}{2} + it) / \pi`.
See Titchmarsh Section 9.3 for details of the definition.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> backlunds(217.3)
0.16302205431184
Generally, the value is a small number. At Gram points it is an integer,
frequently equal to 0::
>>> chop(backlunds(grampoint(200)))
0.0
>>> backlunds(extraprec(10)(grampoint)(211))
1.0
>>> backlunds(extraprec(10)(grampoint)(232))
-1.0
The number of zeros of the Riemann zeta function up to height `t`
satisfies `N(t) = \theta(t)/\pi + 1 + S(t)` (see :func:nzeros` and
:func:`siegeltheta`)::
>>> t = 1234.55
>>> nzeros(t)
842
>>> siegeltheta(t)/pi+1+backlunds(t)
842.0
"""
return ctx.nzeros(t)-1-ctx.siegeltheta(t)/ctx.pi
"""
_ROSSER_EXCEPTIONS is a list of all exceptions to
Rosser's rule for n <= 400 000 000.
Alternately the entry is of type [n,m], or a string.
The string is the zero pattern of the Block and the relevant
adjacent. For example (010)3 corresponds to a block
composed of three Gram intervals, the first ant third without
a zero and the intermediate with a zero. The next Gram interval
contain three zeros. So that in total we have 4 zeros in 4 Gram
blocks. n and m are the indices of the Gram points of this
interval of four Gram intervals. The Rosser exception is therefore
formed by the three Gram intervals that are signaled between
parenthesis.
We have included also some Rosser's exceptions beyond n=400 000 000
that are noted in the literature by some reason.
The list is composed from the data published in the references:
R. P. Brent, J. van de Lune, H. J. J. te Riele, D. T. Winter,
'On the Zeros of the Riemann Zeta Function in the Critical Strip. II',
Math. Comp. 39 (1982) 681--688.
See also Corrigenda in Math. Comp. 46 (1986) 771.
J. van de Lune, H. J. J. te Riele,
'On the Zeros of the Riemann Zeta Function in the Critical Strip. III',
Math. Comp. 41 (1983) 759--767.
See also Corrigenda in Math. Comp. 46 (1986) 771.
J. van de Lune,
'Sums of Equal Powers of Positive Integers',
Dissertation,
Vrije Universiteit te Amsterdam, Centrum voor Wiskunde en Informatica,
Amsterdam, 1984.
Thanks to the authors all this papers and those others that have
contributed to make this possible.
"""
_ROSSER_EXCEPTIONS = \
[[13999525, 13999528], '(00)3',
[30783329, 30783332], '(00)3',
[30930926, 30930929], '3(00)',
[37592215, 37592218], '(00)3',
[40870156, 40870159], '(00)3',
[43628107, 43628110], '(00)3',
[46082042, 46082045], '(00)3',
[46875667, 46875670], '(00)3',
[49624540, 49624543], '3(00)',
[50799238, 50799241], '(00)3',
[55221453, 55221456], '3(00)',
[56948779, 56948782], '3(00)',
[60515663, 60515666], '(00)3',
[61331766, 61331770], '(00)40',
[69784843, 69784846], '3(00)',
[75052114, 75052117], '(00)3',
[79545240, 79545243], '3(00)',
[79652247, 79652250], '3(00)',
[83088043, 83088046], '(00)3',
[83689522, 83689525], '3(00)',
[85348958, 85348961], '(00)3',
[86513820, 86513823], '(00)3',
[87947596, 87947599], '3(00)',
[88600095, 88600098], '(00)3',
[93681183, 93681186], '(00)3',
[100316551, 100316554], '3(00)',
[100788444, 100788447], '(00)3',
[106236172, 106236175], '(00)3',
[106941327, 106941330], '3(00)',
[107287955, 107287958], '(00)3',
[107532016, 107532019], '3(00)',
[110571044, 110571047], '(00)3',
[111885253, 111885256], '3(00)',
[113239783, 113239786], '(00)3',
[120159903, 120159906], '(00)3',
[121424391, 121424394], '3(00)',
[121692931, 121692934], '3(00)',
[121934170, 121934173], '3(00)',
[122612848, 122612851], '3(00)',
[126116567, 126116570], '(00)3',
[127936513, 127936516], '(00)3',
[128710277, 128710280], '3(00)',
[129398902, 129398905], '3(00)',
[130461096, 130461099], '3(00)',
[131331947, 131331950], '3(00)',
[137334071, 137334074], '3(00)',
[137832603, 137832606], '(00)3',
[138799471, 138799474], '3(00)',
[139027791, 139027794], '(00)3',
[141617806, 141617809], '(00)3',
[144454931, 144454934], '(00)3',
[145402379, 145402382], '3(00)',
[146130245, 146130248], '3(00)',
[147059770, 147059773], '(00)3',
[147896099, 147896102], '3(00)',
[151097113, 151097116], '(00)3',
[152539438, 152539441], '(00)3',
[152863168, 152863171], '3(00)',
[153522726, 153522729], '3(00)',
[155171524, 155171527], '3(00)',
[155366607, 155366610], '(00)3',
[157260686, 157260689], '3(00)',
[157269224, 157269227], '(00)3',
[157755123, 157755126], '(00)3',
[158298484, 158298487], '3(00)',
[160369050, 160369053], '3(00)',
[162962787, 162962790], '(00)3',
[163724709, 163724712], '(00)3',
[164198113, 164198116], '3(00)',
[164689301, 164689305], '(00)40',
[164880228, 164880231], '3(00)',
[166201932, 166201935], '(00)3',
[168573836, 168573839], '(00)3',
[169750763, 169750766], '(00)3',
[170375507, 170375510], '(00)3',
[170704879, 170704882], '3(00)',
[172000992, 172000995], '3(00)',
[173289941, 173289944], '(00)3',
[173737613, 173737616], '3(00)',
[174102513, 174102516], '(00)3',
[174284990, 174284993], '(00)3',
[174500513, 174500516], '(00)3',
[175710609, 175710612], '(00)3',
[176870843, 176870846], '3(00)',
[177332732, 177332735], '3(00)',
[177902861, 177902864], '3(00)',
[179979095, 179979098], '(00)3',
[181233726, 181233729], '3(00)',
[181625435, 181625438], '(00)3',
[182105255, 182105259], '22(00)',
[182223559, 182223562], '3(00)',
[191116404, 191116407], '3(00)',
[191165599, 191165602], '3(00)',
[191297535, 191297539], '(00)22',
[192485616, 192485619], '(00)3',
[193264634, 193264638], '22(00)',
[194696968, 194696971], '(00)3',
[195876805, 195876808], '(00)3',
[195916548, 195916551], '3(00)',
[196395160, 196395163], '3(00)',
[196676303, 196676306], '(00)3',
[197889882, 197889885], '3(00)',
[198014122, 198014125], '(00)3',
[199235289, 199235292], '(00)3',
[201007375, 201007378], '(00)3',
[201030605, 201030608], '3(00)',
[201184290, 201184293], '3(00)',
[201685414, 201685418], '(00)22',
[202762875, 202762878], '3(00)',
[202860957, 202860960], '3(00)',
[203832577, 203832580], '3(00)',
[205880544, 205880547], '(00)3',
[206357111, 206357114], '(00)3',
[207159767, 207159770], '3(00)',
[207167343, 207167346], '3(00)',
[207482539, 207482543], '3(010)',
[207669540, 207669543], '3(00)',
[208053426, 208053429], '(00)3',
[208110027, 208110030], '3(00)',
[209513826, 209513829], '3(00)',
[212623522, 212623525], '(00)3',
[213841715, 213841718], '(00)3',
[214012333, 214012336], '(00)3',
[214073567, 214073570], '(00)3',
[215170600, 215170603], '3(00)',
[215881039, 215881042], '3(00)',
[216274604, 216274607], '3(00)',
[216957120, 216957123], '3(00)',
[217323208, 217323211], '(00)3',
[218799264, 218799267], '(00)3',
[218803557, 218803560], '3(00)',
[219735146, 219735149], '(00)3',
[219830062, 219830065], '3(00)',
[219897904, 219897907], '(00)3',
[221205545, 221205548], '(00)3',
[223601929, 223601932], '(00)3',
[223907076, 223907079], '3(00)',
[223970397, 223970400], '(00)3',
[224874044, 224874048], '22(00)',
[225291157, 225291160], '(00)3',
[227481734, 227481737], '(00)3',
[228006442, 228006445], '3(00)',
[228357900, 228357903], '(00)3',
[228386399, 228386402], '(00)3',
[228907446, 228907449], '(00)3',
[228984552, 228984555], '3(00)',
[229140285, 229140288], '3(00)',
[231810024, 231810027], '(00)3',
[232838062, 232838065], '3(00)',
[234389088, 234389091], '3(00)',
[235588194, 235588197], '(00)3',
[236645695, 236645698], '(00)3',
[236962876, 236962879], '3(00)',
[237516723, 237516727], '04(00)',
[240004911, 240004914], '(00)3',
[240221306, 240221309], '3(00)',
[241389213, 241389217], '(010)3',
[241549003, 241549006], '(00)3',
[241729717, 241729720], '(00)3',
[241743684, 241743687], '3(00)',
[243780200, 243780203], '3(00)',
[243801317, 243801320], '(00)3',
[244122072, 244122075], '(00)3',
[244691224, 244691227], '3(00)',
[244841577, 244841580], '(00)3',
[245813461, 245813464], '(00)3',
[246299475, 246299478], '(00)3',
[246450176, 246450179], '3(00)',
[249069349, 249069352], '(00)3',
[250076378, 250076381], '(00)3',
[252442157, 252442160], '3(00)',
[252904231, 252904234], '3(00)',
[255145220, 255145223], '(00)3',
[255285971, 255285974], '3(00)',
[256713230, 256713233], '(00)3',
[257992082, 257992085], '(00)3',
[258447955, 258447959], '22(00)',
[259298045, 259298048], '3(00)',
[262141503, 262141506], '(00)3',
[263681743, 263681746], '3(00)',
[266527881, 266527885], '(010)3',
[266617122, 266617125], '(00)3',
[266628044, 266628047], '3(00)',
[267305763, 267305766], '(00)3',
[267388404, 267388407], '3(00)',
[267441672, 267441675], '3(00)',
[267464886, 267464889], '(00)3',
[267554907, 267554910], '3(00)',
[269787480, 269787483], '(00)3',
[270881434, 270881437], '(00)3',
[270997583, 270997586], '3(00)',
[272096378, 272096381], '3(00)',
[272583009, 272583012], '(00)3',
[274190881, 274190884], '3(00)',
[274268747, 274268750], '(00)3',
[275297429, 275297432], '3(00)',
[275545476, 275545479], '3(00)',
[275898479, 275898482], '3(00)',
[275953000, 275953003], '(00)3',
[277117197, 277117201], '(00)22',
[277447310, 277447313], '3(00)',
[279059657, 279059660], '3(00)',
[279259144, 279259147], '3(00)',
[279513636, 279513639], '3(00)',
[279849069, 279849072], '3(00)',
[280291419, 280291422], '(00)3',
[281449425, 281449428], '3(00)',
[281507953, 281507956], '3(00)',
[281825600, 281825603], '(00)3',
[282547093, 282547096], '3(00)',
[283120963, 283120966], '3(00)',
[283323493, 283323496], '(00)3',
[284764535, 284764538], '3(00)',
[286172639, 286172642], '3(00)',
[286688824, 286688827], '(00)3',
[287222172, 287222175], '3(00)',
[287235534, 287235537], '3(00)',
[287304861, 287304864], '3(00)',
[287433571, 287433574], '(00)3',
[287823551, 287823554], '(00)3',
[287872422, 287872425], '3(00)',
[288766615, 288766618], '3(00)',
[290122963, 290122966], '3(00)',
[290450849, 290450853], '(00)22',
[291426141, 291426144], '3(00)',
[292810353, 292810356], '3(00)',
[293109861, 293109864], '3(00)',
[293398054, 293398057], '3(00)',
[294134426, 294134429], '3(00)',
[294216438, 294216441], '(00)3',
[295367141, 295367144], '3(00)',
[297834111, 297834114], '3(00)',
[299099969, 299099972], '3(00)',
[300746958, 300746961], '3(00)',
[301097423, 301097426], '(00)3',
[301834209, 301834212], '(00)3',
[302554791, 302554794], '(00)3',
[303497445, 303497448], '3(00)',
[304165344, 304165347], '3(00)',
[304790218, 304790222], '3(010)',
[305302352, 305302355], '(00)3',
[306785996, 306785999], '3(00)',
[307051443, 307051446], '3(00)',
[307481539, 307481542], '3(00)',
[308605569, 308605572], '3(00)',
[309237610, 309237613], '3(00)',
[310509287, 310509290], '(00)3',
[310554057, 310554060], '3(00)',
[310646345, 310646348], '3(00)',
[311274896, 311274899], '(00)3',
[311894272, 311894275], '3(00)',
[312269470, 312269473], '(00)3',
[312306601, 312306605], '(00)40',
[312683193, 312683196], '3(00)',
[314499804, 314499807], '3(00)',
[314636802, 314636805], '(00)3',
[314689897, 314689900], '3(00)',
[314721319, 314721322], '3(00)',
[316132890, 316132893], '3(00)',
[316217470, 316217474], '(010)3',
[316465705, 316465708], '3(00)',
[316542790, 316542793], '(00)3',
[320822347, 320822350], '3(00)',
[321733242, 321733245], '3(00)',
[324413970, 324413973], '(00)3',
[325950140, 325950143], '(00)3',
[326675884, 326675887], '(00)3',
[326704208, 326704211], '3(00)',
[327596247, 327596250], '3(00)',
[328123172, 328123175], '3(00)',
[328182212, 328182215], '(00)3',
[328257498, 328257501], '3(00)',
[328315836, 328315839], '(00)3',
[328800974, 328800977], '(00)3',
[328998509, 328998512], '3(00)',
[329725370, 329725373], '(00)3',
[332080601, 332080604], '(00)3',
[332221246, 332221249], '(00)3',
[332299899, 332299902], '(00)3',
[332532822, 332532825], '(00)3',
[333334544, 333334548], '(00)22',
[333881266, 333881269], '3(00)',
[334703267, 334703270], '3(00)',
[334875138, 334875141], '3(00)',
[336531451, 336531454], '3(00)',
[336825907, 336825910], '(00)3',
[336993167, 336993170], '(00)3',
[337493998, 337494001], '3(00)',
[337861034, 337861037], '3(00)',
[337899191, 337899194], '(00)3',
[337958123, 337958126], '(00)3',
[342331982, 342331985], '3(00)',
[342676068, 342676071], '3(00)',
[347063781, 347063784], '3(00)',
[347697348, 347697351], '3(00)',
[347954319, 347954322], '3(00)',
[348162775, 348162778], '3(00)',
[349210702, 349210705], '(00)3',
[349212913, 349212916], '3(00)',
[349248650, 349248653], '(00)3',
[349913500, 349913503], '3(00)',
[350891529, 350891532], '3(00)',
[351089323, 351089326], '3(00)',
[351826158, 351826161], '3(00)',
[352228580, 352228583], '(00)3',
[352376244, 352376247], '3(00)',
[352853758, 352853761], '(00)3',
[355110439, 355110442], '(00)3',
[355808090, 355808094], '(00)40',
[355941556, 355941559], '3(00)',
[356360231, 356360234], '(00)3',
[356586657, 356586660], '3(00)',
[356892926, 356892929], '(00)3',
[356908232, 356908235], '3(00)',
[357912730, 357912733], '3(00)',
[358120344, 358120347], '3(00)',
[359044096, 359044099], '(00)3',
[360819357, 360819360], '3(00)',
[361399662, 361399666], '(010)3',
[362361315, 362361318], '(00)3',
[363610112, 363610115], '(00)3',
[363964804, 363964807], '3(00)',
[364527375, 364527378], '(00)3',
[365090327, 365090330], '(00)3',
[365414539, 365414542], '3(00)',
[366738474, 366738477], '3(00)',
[368714778, 368714783], '04(010)',
[368831545, 368831548], '(00)3',
[368902387, 368902390], '(00)3',
[370109769, 370109772], '3(00)',
[370963333, 370963336], '3(00)',
[372541136, 372541140], '3(010)',
[372681562, 372681565], '(00)3',
[373009410, 373009413], '(00)3',
[373458970, 373458973], '3(00)',
[375648658, 375648661], '3(00)',
[376834728, 376834731], '3(00)',
[377119945, 377119948], '(00)3',
[377335703, 377335706], '(00)3',
[378091745, 378091748], '3(00)',
[379139522, 379139525], '3(00)',
[380279160, 380279163], '(00)3',
[380619442, 380619445], '3(00)',
[381244231, 381244234], '3(00)',
[382327446, 382327450], '(010)3',
[382357073, 382357076], '3(00)',
[383545479, 383545482], '3(00)',
[384363766, 384363769], '(00)3',
[384401786, 384401790], '22(00)',
[385198212, 385198215], '3(00)',
[385824476, 385824479], '(00)3',
[385908194, 385908197], '3(00)',
[386946806, 386946809], '3(00)',
[387592175, 387592179], '22(00)',
[388329293, 388329296], '(00)3',
[388679566, 388679569], '3(00)',
[388832142, 388832145], '3(00)',
[390087103, 390087106], '(00)3',
[390190926, 390190930], '(00)22',
[390331207, 390331210], '3(00)',
[391674495, 391674498], '3(00)',
[391937831, 391937834], '3(00)',
[391951632, 391951636], '(00)22',
[392963986, 392963989], '(00)3',
[393007921, 393007924], '3(00)',
[393373210, 393373213], '3(00)',
[393759572, 393759575], '(00)3',
[394036662, 394036665], '(00)3',
[395813866, 395813869], '(00)3',
[395956690, 395956693], '3(00)',
[396031670, 396031673], '3(00)',
[397076433, 397076436], '3(00)',
[397470601, 397470604], '3(00)',
[398289458, 398289461], '3(00)',
#
[368714778, 368714783], '04(010)',
[437953499, 437953504], '04(010)',
[526196233, 526196238], '032(00)',
[744719566, 744719571], '(010)40',
[750375857, 750375862], '032(00)',
[958241932, 958241937], '04(010)',
[983377342, 983377347], '(00)410',
[1003780080, 1003780085], '04(010)',
[1070232754, 1070232759], '(00)230',
[1209834865, 1209834870], '032(00)',
[1257209100, 1257209105], '(00)410',
[1368002233, 1368002238], '(00)230'
]
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/zetazeros.py
|
zetazeros.py
|
from .functions import defun, defun_wrapped
def _hermite_param(ctx, n, z, parabolic_cylinder):
"""
Combined calculation of the Hermite polynomial H_n(z) (and its
generalization to complex n) and the parabolic cylinder
function D.
"""
n, ntyp = ctx._convert_param(n)
z = ctx.convert(z)
q = -ctx.mpq_1_2
# For re(z) > 0, 2F0 -- http://functions.wolfram.com/
# HypergeometricFunctions/HermiteHGeneral/06/02/0009/
# Otherwise, there is a reflection formula
# 2F0 + http://functions.wolfram.com/HypergeometricFunctions/
# HermiteHGeneral/16/01/01/0006/
#
# TODO:
# An alternative would be to use
# http://functions.wolfram.com/HypergeometricFunctions/
# HermiteHGeneral/06/02/0006/
#
# Also, the 1F1 expansion
# http://functions.wolfram.com/HypergeometricFunctions/
# HermiteHGeneral/26/01/02/0001/
# should probably be used for tiny z
if not z:
T1 = [2, ctx.pi], [n, 0.5], [], [q*(n-1)], [], [], 0
if parabolic_cylinder:
T1[1][0] += q*n
return T1,
can_use_2f0 = ctx.isnpint(-n) or ctx.re(z) > 0 or \
(ctx.re(z) == 0 and ctx.im(z) > 0)
expprec = ctx.prec*4 + 20
if parabolic_cylinder:
u = ctx.fmul(ctx.fmul(z,z,prec=expprec), -0.25, exact=True)
w = ctx.fmul(z, ctx.sqrt(0.5,prec=expprec), prec=expprec)
else:
w = z
w2 = ctx.fmul(w, w, prec=expprec)
rw2 = ctx.fdiv(1, w2, prec=expprec)
nrw2 = ctx.fneg(rw2, exact=True)
nw = ctx.fneg(w, exact=True)
if can_use_2f0:
T1 = [2, w], [n, n], [], [], [q*n, q*(n-1)], [], nrw2
terms = [T1]
else:
T1 = [2, nw], [n, n], [], [], [q*n, q*(n-1)], [], nrw2
T2 = [2, ctx.pi, nw], [n+2, 0.5, 1], [], [q*n], [q*(n-1)], [1-q], w2
terms = [T1,T2]
# Multiply by prefactor for D_n
if parabolic_cylinder:
expu = ctx.exp(u)
for i in range(len(terms)):
terms[i][1][0] += q*n
terms[i][0].append(expu)
terms[i][1].append(1)
return tuple(terms)
@defun
def hermite(ctx, n, z, **kwargs):
return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 0), [], **kwargs)
@defun
def pcfd(ctx, n, z, **kwargs):
r"""
Gives the parabolic cylinder function in Whittaker's notation
`D_n(z) = U(-n-1/2, z)` (see :func:`~mpmath.pcfu`).
It solves the differential equation
.. math ::
y'' + \left(n + \frac{1}{2} - \frac{1}{4} z^2\right) y = 0.
and can be represented in terms of Hermite polynomials
(see :func:`~mpmath.hermite`) as
.. math ::
D_n(z) = 2^{-n/2} e^{-z^2/4} H_n\left(\frac{z}{\sqrt{2}}\right).
**Plots**
.. literalinclude :: /plots/pcfd.py
.. image :: /plots/pcfd.png
**Examples**
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> pcfd(0,0); pcfd(1,0); pcfd(2,0); pcfd(3,0)
1.0
0.0
-1.0
0.0
>>> pcfd(4,0); pcfd(-3,0)
3.0
0.6266570686577501256039413
>>> pcfd('1/2', 2+3j)
(-5.363331161232920734849056 - 3.858877821790010714163487j)
>>> pcfd(2, -10)
1.374906442631438038871515e-9
Verifying the differential equation::
>>> n = mpf(2.5)
>>> y = lambda z: pcfd(n,z)
>>> z = 1.75
>>> chop(diff(y,z,2) + (n+0.5-0.25*z**2)*y(z))
0.0
Rational Taylor series expansion when `n` is an integer::
>>> taylor(lambda z: pcfd(5,z), 0, 7)
[0.0, 15.0, 0.0, -13.75, 0.0, 3.96875, 0.0, -0.6015625]
"""
return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 1), [], **kwargs)
@defun
def pcfu(ctx, a, z, **kwargs):
r"""
Gives the parabolic cylinder function `U(a,z)`, which may be
defined for `\Re(z) > 0` in terms of the confluent
U-function (see :func:`~mpmath.hyperu`) by
.. math ::
U(a,z) = 2^{-\frac{1}{4}-\frac{a}{2}} e^{-\frac{1}{4} z^2}
U\left(\frac{a}{2}+\frac{1}{4},
\frac{1}{2}, \frac{1}{2}z^2\right)
or, for arbitrary `z`,
.. math ::
e^{-\frac{1}{4}z^2} U(a,z) =
U(a,0) \,_1F_1\left(-\tfrac{a}{2}+\tfrac{1}{4};
\tfrac{1}{2}; -\tfrac{1}{2}z^2\right) +
U'(a,0) z \,_1F_1\left(-\tfrac{a}{2}+\tfrac{3}{4};
\tfrac{3}{2}; -\tfrac{1}{2}z^2\right).
**Examples**
Connection to other functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> z = mpf(3)
>>> pcfu(0.5,z)
0.03210358129311151450551963
>>> sqrt(pi/2)*exp(z**2/4)*erfc(z/sqrt(2))
0.03210358129311151450551963
>>> pcfu(0.5,-z)
23.75012332835297233711255
>>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2))
23.75012332835297233711255
>>> pcfu(0.5,-z)
23.75012332835297233711255
>>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2))
23.75012332835297233711255
"""
n, _ = ctx._convert_param(a)
return ctx.pcfd(-n-ctx.mpq_1_2, z)
@defun
def pcfv(ctx, a, z, **kwargs):
r"""
Gives the parabolic cylinder function `V(a,z)`, which can be
represented in terms of :func:`~mpmath.pcfu` as
.. math ::
V(a,z) = \frac{\Gamma(a+\tfrac{1}{2}) (U(a,-z)-\sin(\pi a) U(a,z)}{\pi}.
**Examples**
Wronskian relation between `U` and `V`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, z = 2, 3
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
0.7978845608028653558798921
>>> sqrt(2/pi)
0.7978845608028653558798921
>>> a, z = 2.5, 3
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
0.7978845608028653558798921
>>> a, z = 0.25, -1
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
0.7978845608028653558798921
>>> a, z = 2+1j, 2+3j
>>> chop(pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z))
0.7978845608028653558798921
"""
n, ntype = ctx._convert_param(a)
z = ctx.convert(z)
q = ctx.mpq_1_2
r = ctx.mpq_1_4
if ntype == 'Q' and ctx.isint(n*2):
# Faster for half-integers
def h():
jz = ctx.fmul(z, -1j, exact=True)
T1terms = _hermite_param(ctx, -n-q, z, 1)
T2terms = _hermite_param(ctx, n-q, jz, 1)
for T in T1terms:
T[0].append(1j)
T[1].append(1)
T[3].append(q-n)
u = ctx.expjpi((q*n-r)) * ctx.sqrt(2/ctx.pi)
for T in T2terms:
T[0].append(u)
T[1].append(1)
return T1terms + T2terms
v = ctx.hypercomb(h, [], **kwargs)
if ctx._is_real_type(n) and ctx._is_real_type(z):
v = ctx._re(v)
return v
else:
def h(n):
w = ctx.square_exp_arg(z, -0.25)
u = ctx.square_exp_arg(z, 0.5)
e = ctx.exp(w)
l = [ctx.pi, q, ctx.exp(w)]
Y1 = l, [-q, n*q+r, 1], [r-q*n], [], [q*n+r], [q], u
Y2 = l + [z], [-q, n*q-r, 1, 1], [1-r-q*n], [], [q*n+1-r], [1+q], u
c, s = ctx.cospi_sinpi(r+q*n)
Y1[0].append(s)
Y2[0].append(c)
for Y in (Y1, Y2):
Y[1].append(1)
Y[3].append(q-n)
return Y1, Y2
return ctx.hypercomb(h, [n], **kwargs)
@defun
def pcfw(ctx, a, z, **kwargs):
r"""
Gives the parabolic cylinder function `W(a,z)` defined in (DLMF 12.14).
**Examples**
Value at the origin::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a = mpf(0.25)
>>> pcfw(a,0)
0.9722833245718180765617104
>>> power(2,-0.75)*sqrt(abs(gamma(0.25+0.5j*a)/gamma(0.75+0.5j*a)))
0.9722833245718180765617104
>>> diff(pcfw,(a,0),(0,1))
-0.5142533944210078966003624
>>> -power(2,-0.25)*sqrt(abs(gamma(0.75+0.5j*a)/gamma(0.25+0.5j*a)))
-0.5142533944210078966003624
"""
n, _ = ctx._convert_param(a)
z = ctx.convert(z)
def terms():
phi2 = ctx.arg(ctx.gamma(0.5 + ctx.j*n))
phi2 = (ctx.loggamma(0.5+ctx.j*n) - ctx.loggamma(0.5-ctx.j*n))/2j
rho = ctx.pi/8 + 0.5*phi2
# XXX: cancellation computing k
k = ctx.sqrt(1 + ctx.exp(2*ctx.pi*n)) - ctx.exp(ctx.pi*n)
C = ctx.sqrt(k/2) * ctx.exp(0.25*ctx.pi*n)
yield C * ctx.expj(rho) * ctx.pcfu(ctx.j*n, z*ctx.expjpi(-0.25))
yield C * ctx.expj(-rho) * ctx.pcfu(-ctx.j*n, z*ctx.expjpi(0.25))
v = ctx.sum_accurately(terms)
if ctx._is_real_type(n) and ctx._is_real_type(z):
v = ctx._re(v)
return v
"""
Even/odd PCFs. Useful?
@defun
def pcfy1(ctx, a, z, **kwargs):
a, _ = ctx._convert_param(n)
z = ctx.convert(z)
def h():
w = ctx.square_exp_arg(z)
w1 = ctx.fmul(w, -0.25, exact=True)
w2 = ctx.fmul(w, 0.5, exact=True)
e = ctx.exp(w1)
return [e], [1], [], [], [ctx.mpq_1_2*a+ctx.mpq_1_4], [ctx.mpq_1_2], w2
return ctx.hypercomb(h, [], **kwargs)
@defun
def pcfy2(ctx, a, z, **kwargs):
a, _ = ctx._convert_param(n)
z = ctx.convert(z)
def h():
w = ctx.square_exp_arg(z)
w1 = ctx.fmul(w, -0.25, exact=True)
w2 = ctx.fmul(w, 0.5, exact=True)
e = ctx.exp(w1)
return [e, z], [1, 1], [], [], [ctx.mpq_1_2*a+ctx.mpq_3_4], \
[ctx.mpq_3_2], w2
return ctx.hypercomb(h, [], **kwargs)
"""
@defun_wrapped
def gegenbauer(ctx, n, a, z, **kwargs):
# Special cases: a+0.5, a*2 poles
if ctx.isnpint(a):
return 0*(z+n)
if ctx.isnpint(a+0.5):
# TODO: something else is required here
# E.g.: gegenbauer(-2, -0.5, 3) == -12
if ctx.isnpint(n+1):
raise NotImplementedError("Gegenbauer function with two limits")
def h(a):
a2 = 2*a
T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z)
return [T]
return ctx.hypercomb(h, [a], **kwargs)
def h(n):
a2 = 2*a
T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z)
return [T]
return ctx.hypercomb(h, [n], **kwargs)
@defun_wrapped
def jacobi(ctx, n, a, b, x, **kwargs):
if not ctx.isnpint(a):
def h(n):
return (([], [], [a+n+1], [n+1, a+1], [-n, a+b+n+1], [a+1], (1-x)*0.5),)
return ctx.hypercomb(h, [n], **kwargs)
if not ctx.isint(b):
def h(n, a):
return (([], [], [-b], [n+1, -b-n], [-n, a+b+n+1], [b+1], (x+1)*0.5),)
return ctx.hypercomb(h, [n, a], **kwargs)
# XXX: determine appropriate limit
return ctx.binomial(n+a,n) * ctx.hyp2f1(-n,1+n+a+b,a+1,(1-x)/2, **kwargs)
@defun_wrapped
def laguerre(ctx, n, a, z, **kwargs):
# XXX: limits, poles
#if ctx.isnpint(n):
# return 0*(a+z)
def h(a):
return (([], [], [a+n+1], [a+1, n+1], [-n], [a+1], z),)
return ctx.hypercomb(h, [a], **kwargs)
@defun_wrapped
def legendre(ctx, n, x, **kwargs):
if ctx.isint(n):
n = int(n)
# Accuracy near zeros
if (n + (n < 0)) & 1:
if not x:
return x
mag = ctx.mag(x)
if mag < -2*ctx.prec-10:
return x
if mag < -5:
ctx.prec += -mag
return ctx.hyp2f1(-n,n+1,1,(1-x)/2, **kwargs)
@defun
def legenp(ctx, n, m, z, type=2, **kwargs):
# Legendre function, 1st kind
n = ctx.convert(n)
m = ctx.convert(m)
# Faster
if not m:
return ctx.legendre(n, z, **kwargs)
# TODO: correct evaluation at singularities
if type == 2:
def h(n,m):
g = m*0.5
T = [1+z, 1-z], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z)
return (T,)
return ctx.hypercomb(h, [n,m], **kwargs)
if type == 3:
def h(n,m):
g = m*0.5
T = [z+1, z-1], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z)
return (T,)
return ctx.hypercomb(h, [n,m], **kwargs)
raise ValueError("requires type=2 or type=3")
@defun
def legenq(ctx, n, m, z, type=2, **kwargs):
# Legendre function, 2nd kind
n = ctx.convert(n)
m = ctx.convert(m)
z = ctx.convert(z)
if z in (1, -1):
#if ctx.isint(m):
# return ctx.nan
#return ctx.inf # unsigned
return ctx.nan
if type == 2:
def h(n, m):
cos, sin = ctx.cospi_sinpi(m)
s = 2 * sin / ctx.pi
c = cos
a = 1+z
b = 1-z
u = m/2
w = (1-z)/2
T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \
[-n, n+1], [1-m], w
T2 = [-s, a, b], [-1, -u, u], [n+m+1], [n-m+1, m+1], \
[-n, n+1], [m+1], w
return T1, T2
return ctx.hypercomb(h, [n, m], **kwargs)
if type == 3:
# The following is faster when there only is a single series
# Note: not valid for -1 < z < 0 (?)
if abs(z) > 1:
def h(n, m):
T1 = [ctx.expjpi(m), 2, ctx.pi, z, z-1, z+1], \
[1, -n-1, 0.5, -n-m-1, 0.5*m, 0.5*m], \
[n+m+1], [n+1.5], \
[0.5*(2+n+m), 0.5*(1+n+m)], [n+1.5], z**(-2)
return [T1]
return ctx.hypercomb(h, [n, m], **kwargs)
else:
# not valid for 1 < z < inf ?
def h(n, m):
s = 2 * ctx.sinpi(m) / ctx.pi
c = ctx.expjpi(m)
a = 1+z
b = z-1
u = m/2
w = (1-z)/2
T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \
[-n, n+1], [1-m], w
T2 = [-s, c, a, b], [-1, 1, -u, u], [n+m+1], [n-m+1, m+1], \
[-n, n+1], [m+1], w
return T1, T2
return ctx.hypercomb(h, [n, m], **kwargs)
raise ValueError("requires type=2 or type=3")
@defun_wrapped
def chebyt(ctx, n, x, **kwargs):
if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1:
return x * 0
return ctx.hyp2f1(-n,n,(1,2),(1-x)/2, **kwargs)
@defun_wrapped
def chebyu(ctx, n, x, **kwargs):
if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1:
return x * 0
return (n+1) * ctx.hyp2f1(-n, n+2, (3,2), (1-x)/2, **kwargs)
@defun
def spherharm(ctx, l, m, theta, phi, **kwargs):
l = ctx.convert(l)
m = ctx.convert(m)
theta = ctx.convert(theta)
phi = ctx.convert(phi)
l_isint = ctx.isint(l)
l_natural = l_isint and l >= 0
m_isint = ctx.isint(m)
if l_isint and l < 0 and m_isint:
return ctx.spherharm(-(l+1), m, theta, phi, **kwargs)
if theta == 0 and m_isint and m < 0:
return ctx.zero * 1j
if l_natural and m_isint:
if abs(m) > l:
return ctx.zero * 1j
# http://functions.wolfram.com/Polynomials/
# SphericalHarmonicY/26/01/02/0004/
def h(l,m):
absm = abs(m)
C = [-1, ctx.expj(m*phi),
(2*l+1)*ctx.fac(l+absm)/ctx.pi/ctx.fac(l-absm),
ctx.sin(theta)**2,
ctx.fac(absm), 2]
P = [0.5*m*(ctx.sign(m)+1), 1, 0.5, 0.5*absm, -1, -absm-1]
return ((C, P, [], [], [absm-l, l+absm+1], [absm+1],
ctx.sin(0.5*theta)**2),)
else:
# http://functions.wolfram.com/HypergeometricFunctions/
# SphericalHarmonicYGeneral/26/01/02/0001/
def h(l,m):
if ctx.isnpint(l-m+1) or ctx.isnpint(l+m+1) or ctx.isnpint(1-m):
return (([0], [-1], [], [], [], [], 0),)
cos, sin = ctx.cos_sin(0.5*theta)
C = [0.5*ctx.expj(m*phi), (2*l+1)/ctx.pi,
ctx.gamma(l-m+1), ctx.gamma(l+m+1),
cos**2, sin**2]
P = [1, 0.5, 0.5, -0.5, 0.5*m, -0.5*m]
return ((C, P, [], [1-m], [-l,l+1], [1-m], sin**2),)
return ctx.hypercomb(h, [l,m], **kwargs)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/orthogonal.py
|
orthogonal.py
|
from .functions import defun, defun_wrapped
@defun
def j0(ctx, x):
"""Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`."""
return ctx.besselj(0, x)
@defun
def j1(ctx, x):
"""Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`."""
return ctx.besselj(1, x)
@defun
def besselj(ctx, n, z, derivative=0, **kwargs):
if type(n) is int:
n_isint = True
else:
n = ctx.convert(n)
n_isint = ctx.isint(n)
if n_isint:
n = int(ctx._re(n))
if n_isint and n < 0:
return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs)
z = ctx.convert(z)
M = ctx.mag(z)
if derivative:
d = ctx.convert(derivative)
# TODO: the integer special-casing shouldn't be necessary.
# However, the hypergeometric series gets inaccurate for large d
# because of inaccurate pole cancellation at a pole far from
# zero (needs to be fixed in hypercomb or hypsum)
if ctx.isint(d) and d >= 0:
d = int(d)
orig = ctx.prec
try:
ctx.prec += 15
v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z)
for k in range(d+1))
finally:
ctx.prec = orig
v *= ctx.mpf(2)**(-d)
else:
def h(n,d):
r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True)
B = [0.5*(n-d+1), 0.5*(n-d+2)]
T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)]
return T
v = ctx.hypercomb(h, [n,d], **kwargs)
else:
# Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation
if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20:
try:
return ctx._besselj(n, z)
except NotImplementedError:
pass
if not z:
if not n:
v = ctx.one + n+z
elif ctx.re(n) > 0:
v = n*z
else:
v = ctx.inf + z + n
else:
#v = 0
orig = ctx.prec
try:
# XXX: workaround for accuracy in low level hypergeometric series
# when alternating, large arguments
ctx.prec += min(3*abs(M), ctx.prec)
w = ctx.fmul(z, 0.5, exact=True)
def h(n):
r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True)
return [([w], [n], [], [n+1], [], [n+1], r)]
v = ctx.hypercomb(h, [n], **kwargs)
finally:
ctx.prec = orig
v = +v
return v
@defun
def besseli(ctx, n, z, derivative=0, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
if not z:
if derivative:
raise ValueError
if not n:
# I(0,0) = 1
return 1+n+z
if ctx.isint(n):
return 0*(n+z)
r = ctx.re(n)
if r == 0:
return ctx.nan*(n+z)
elif r > 0:
return 0*(n+z)
else:
return ctx.inf+(n+z)
M = ctx.mag(z)
if derivative:
d = ctx.convert(derivative)
def h(n,d):
r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True)
B = [0.5*(n-d+1), 0.5*(n-d+2), n+1]
T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)]
return T
v = ctx.hypercomb(h, [n,d], **kwargs)
else:
def h(n):
w = ctx.fmul(z, 0.5, exact=True)
r = ctx.fmul(w, w, prec=max(0,ctx.prec+M))
return [([w], [n], [], [n+1], [], [n+1], r)]
v = ctx.hypercomb(h, [n], **kwargs)
return v
@defun_wrapped
def bessely(ctx, n, z, derivative=0, **kwargs):
if not z:
if derivative:
# Not implemented
raise ValueError
if not n:
# ~ log(z/2)
return -ctx.inf + (n+z)
if ctx.im(n):
return nan * (n+z)
r = ctx.re(n)
q = n+0.5
if ctx.isint(q):
if n > 0:
return -ctx.inf + (n+z)
else:
return 0 * (n+z)
if r < 0 and int(ctx.floor(q)) % 2:
return ctx.inf + (n+z)
else:
return ctx.ninf + (n+z)
# XXX: use hypercomb
ctx.prec += 10
m, d = ctx.nint_distance(n)
if d < -ctx.prec:
h = +ctx.eps
ctx.prec *= 2
n += h
elif d < 0:
ctx.prec -= d
# TODO: avoid cancellation for imaginary arguments
cos, sin = ctx.cospi_sinpi(n)
return (ctx.besselj(n,z,derivative,**kwargs)*cos - \
ctx.besselj(-n,z,derivative,**kwargs))/sin
@defun_wrapped
def besselk(ctx, n, z, **kwargs):
if not z:
return ctx.inf
M = ctx.mag(z)
if M < 1:
# Represent as limit definition
def h(n):
r = (z/2)**2
T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r
T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r
return T1, T2
# We could use the limit definition always, but it leads
# to very bad cancellation (of exponentially large terms)
# for large real z
# Instead represent in terms of 2F0
else:
ctx.prec += M
def h(n):
return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \
[n+0.5, 0.5-n], [], -1/(2*z))]
return ctx.hypercomb(h, [n], **kwargs)
@defun_wrapped
def hankel1(ctx,n,x,**kwargs):
return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs)
@defun_wrapped
def hankel2(ctx,n,x,**kwargs):
return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs)
@defun_wrapped
def whitm(ctx,k,m,z,**kwargs):
if z == 0:
# M(k,m,z) = 0^(1/2+m)
if ctx.re(m) > -0.5:
return z
elif ctx.re(m) < -0.5:
return ctx.inf + z
else:
return ctx.nan * z
x = ctx.fmul(-0.5, z, exact=True)
y = 0.5+m
return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs)
@defun_wrapped
def whitw(ctx,k,m,z,**kwargs):
if z == 0:
g = abs(ctx.re(m))
if g < 0.5:
return z
elif g > 0.5:
return ctx.inf + z
else:
return ctx.nan * z
x = ctx.fmul(-0.5, z, exact=True)
y = 0.5+m
return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs)
@defun
def hyperu(ctx, a, b, z, **kwargs):
a, atype = ctx._convert_param(a)
b, btype = ctx._convert_param(b)
z = ctx.convert(z)
if not z:
if ctx.re(b) <= 1:
return ctx.gammaprod([1-b],[a-b+1])
else:
return ctx.inf + z
bb = 1+a-b
bb, bbtype = ctx._convert_param(bb)
try:
orig = ctx.prec
try:
ctx.prec += 10
v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec)
return v / z**a
finally:
ctx.prec = orig
except ctx.NoConvergence:
pass
def h(a,b):
w = ctx.sinpi(b)
T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z)
T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z)
return T1, T2
return ctx.hypercomb(h, [a,b], **kwargs)
@defun
def struveh(ctx,n,z, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
# http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/
def h(n):
return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)]
return ctx.hypercomb(h, [n], **kwargs)
@defun
def struvel(ctx,n,z, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
# http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/
def h(n):
return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)]
return ctx.hypercomb(h, [n], **kwargs)
def _anger(ctx,which,v,z,**kwargs):
v = ctx._convert_param(v)[0]
z = ctx.convert(z)
def h(v):
b = ctx.mpq_1_2
u = v*b
m = b*3
a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u
c, s = ctx.cospi_sinpi(u)
if which == 0:
A, B = [b*z, s], [c]
if which == 1:
A, B = [b*z, -c], [s]
w = ctx.square_exp_arg(z, mult=-0.25)
T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w
T2 = B, [1], [], [b1,b2], [1], [b1,b2], w
return T1, T2
return ctx.hypercomb(h, [v], **kwargs)
@defun
def angerj(ctx, v, z, **kwargs):
return _anger(ctx, 0, v, z, **kwargs)
@defun
def webere(ctx, v, z, **kwargs):
return _anger(ctx, 1, v, z, **kwargs)
@defun
def lommels1(ctx, u, v, z, **kwargs):
u = ctx._convert_param(u)[0]
v = ctx._convert_param(v)[0]
z = ctx.convert(z)
def h(u,v):
b = ctx.mpq_1_2
w = ctx.square_exp_arg(z, mult=-0.25)
return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \
[b*(u-v+3),b*(u+v+3)], w),
return ctx.hypercomb(h, [u,v], **kwargs)
@defun
def lommels2(ctx, u, v, z, **kwargs):
u = ctx._convert_param(u)[0]
v = ctx._convert_param(v)[0]
z = ctx.convert(z)
# Asymptotic expansion (GR p. 947) -- need to be careful
# not to use for small arguments
# def h(u,v):
# b = ctx.mpq_1_2
# w = -(z/2)**(-2)
# return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w),
def h(u,v):
b = ctx.mpq_1_2
w = ctx.square_exp_arg(z, mult=-0.25)
T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w
T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w
T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w
#c1 = ctx.cospi((u-v)*b)
#c2 = ctx.cospi((u+v)*b)
#s = ctx.sinpi(v)
#r1 = (u-v+1)*b
#r2 = (u+v+1)*b
#T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w
#T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w
#T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w
#T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w
return T1, T2, T3
return ctx.hypercomb(h, [u,v], **kwargs)
@defun
def ber(ctx, n, z, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/
def h(n):
r = -(z/4)**4
cos, sin = ctx.cospi_sinpi(-0.75*n)
T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
return T1, T2
return ctx.hypercomb(h, [n], **kwargs)
@defun
def bei(ctx, n, z, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/
def h(n):
r = -(z/4)**4
cos, sin = ctx.cospi_sinpi(0.75*n)
T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
return T1, T2
return ctx.hypercomb(h, [n], **kwargs)
@defun
def ker(ctx, n, z, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/
def h(n):
r = -(z/4)**4
cos1, sin1 = ctx.cospi_sinpi(0.25*n)
cos2, sin2 = ctx.cospi_sinpi(0.75*n)
T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r
T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r
T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
return T1, T2, T3, T4
return ctx.hypercomb(h, [n], **kwargs)
@defun
def kei(ctx, n, z, **kwargs):
n = ctx.convert(n)
z = ctx.convert(z)
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/
def h(n):
r = -(z/4)**4
cos1, sin1 = ctx.cospi_sinpi(0.75*n)
cos2, sin2 = ctx.cospi_sinpi(0.25*n)
T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r
T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r
return T1, T2, T3, T4
return ctx.hypercomb(h, [n], **kwargs)
# TODO: do this more generically?
def c_memo(f):
name = f.__name__
def f_wrapped(ctx):
cache = ctx._misc_const_cache
prec = ctx.prec
p,v = cache.get(name, (-1,0))
if p >= prec:
return +v
else:
cache[name] = (prec, f(ctx))
return cache[name][1]
return f_wrapped
@c_memo
def _airyai_C1(ctx):
return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3))
@c_memo
def _airyai_C2(ctx):
return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3))
@c_memo
def _airybi_C1(ctx):
return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3))
@c_memo
def _airybi_C2(ctx):
return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3)
def _airybi_n2_inf(ctx):
prec = ctx.prec
try:
v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi)
finally:
ctx.prec = prec
return +v
# Derivatives at z = 0
# TODO: could be expressed more elegantly using triple factorials
def _airyderiv_0(ctx, z, n, ntype, which):
if ntype == 'Z':
if n < 0:
return z
r = ctx.mpq_1_3
prec = ctx.prec
try:
ctx.prec += 10
v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi
if which == 0:
v *= ctx.sinpi(2*(n+1)*r)
v /= ctx.power(3,'2/3')
else:
v *= abs(ctx.sinpi(2*(n+1)*r))
v /= ctx.power(3,'1/6')
finally:
ctx.prec = prec
return +v + z
else:
# singular (does the limit exist?)
raise NotImplementedError
@defun
def airyai(ctx, z, derivative=0, **kwargs):
z = ctx.convert(z)
if derivative:
n, ntype = ctx._convert_param(derivative)
else:
n = 0
# Values at infinities
if not ctx.isnormal(z) and z:
if n and ntype == 'Z':
if n == -1:
if z == ctx.inf:
return ctx.mpf(1)/3 + 1/z
if z == ctx.ninf:
return ctx.mpf(-2)/3 + 1/z
if n < -1:
if z == ctx.inf:
return z
if z == ctx.ninf:
return (-1)**n * (-z)
if (not n) and z == ctx.inf or z == ctx.ninf:
return 1/z
# TODO: limits
raise ValueError("essential singularity of Ai(z)")
# Account for exponential scaling
if z:
extraprec = max(0, int(1.5*ctx.mag(z)))
else:
extraprec = 0
if n:
if n == 1:
def h():
# http://functions.wolfram.com/03.07.06.0005.01
if ctx._re(z) > 4:
ctx.prec += extraprec
w = z**1.5; r = -0.75/w; u = -2*w/3
ctx.prec -= extraprec
C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4)
return ([C],[1],[],[],[(-1,6),(7,6)],[],r),
# http://functions.wolfram.com/03.07.26.0001.01
else:
ctx.prec += extraprec
w = z**3 / 9
ctx.prec -= extraprec
C1 = _airyai_C1(ctx) * 0.5
C2 = _airyai_C2(ctx)
T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
return T1, T2
return ctx.hypercomb(h, [], **kwargs)
else:
if z == 0:
return _airyderiv_0(ctx, z, n, ntype, 0)
# http://functions.wolfram.com/03.05.20.0004.01
def h(n):
ctx.prec += extraprec
w = z**3/9
ctx.prec -= extraprec
q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \
[a1,a2], [b1,b2,b3], w
a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \
[a1,a2], [b1,b2,b3], w
return T1, T2
v = ctx.hypercomb(h, [n], **kwargs)
if ctx._is_real_type(z) and ctx.isint(n):
v = ctx._re(v)
return v
else:
def h():
if ctx._re(z) > 4:
# We could use 1F1, but it results in huge cancellation;
# the following expansion is better.
# TODO: asymptotic series for derivatives
ctx.prec += extraprec
w = z**1.5; r = -0.75/w; u = -2*w/3
ctx.prec -= extraprec
C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4))
return ([C],[1],[],[],[(1,6),(5,6)],[],r),
else:
ctx.prec += extraprec
w = z**3 / 9
ctx.prec -= extraprec
C1 = _airyai_C1(ctx)
C2 = _airyai_C2(ctx)
T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
return T1, T2
return ctx.hypercomb(h, [], **kwargs)
@defun
def airybi(ctx, z, derivative=0, **kwargs):
z = ctx.convert(z)
if derivative:
n, ntype = ctx._convert_param(derivative)
else:
n = 0
# Values at infinities
if not ctx.isnormal(z) and z:
if n and ntype == 'Z':
if z == ctx.inf:
return z
if z == ctx.ninf:
if n == -1:
return 1/z
if n == -2:
return _airybi_n2_inf(ctx)
if n < -2:
return (-1)**n * (-z)
if not n:
if z == ctx.inf:
return z
if z == ctx.ninf:
return 1/z
# TODO: limits
raise ValueError("essential singularity of Bi(z)")
if z:
extraprec = max(0, int(1.5*ctx.mag(z)))
else:
extraprec = 0
if n:
if n == 1:
# http://functions.wolfram.com/03.08.26.0001.01
def h():
ctx.prec += extraprec
w = z**3 / 9
ctx.prec -= extraprec
C1 = _airybi_C1(ctx)*0.5
C2 = _airybi_C2(ctx)
T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
return T1, T2
return ctx.hypercomb(h, [], **kwargs)
else:
if z == 0:
return _airyderiv_0(ctx, z, n, ntype, 1)
def h(n):
ctx.prec += extraprec
w = z**3/9
ctx.prec -= extraprec
q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
q16 = ctx.mpq_1_6
q56 = ctx.mpq_5_6
a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \
[a1,a2], [b1,b2,b3], w
a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \
[a1,a2], [b1,b2,b3], w
return T1, T2
v = ctx.hypercomb(h, [n], **kwargs)
if ctx._is_real_type(z) and ctx.isint(n):
v = ctx._re(v)
return v
else:
def h():
ctx.prec += extraprec
w = z**3 / 9
ctx.prec -= extraprec
C1 = _airybi_C1(ctx)
C2 = _airybi_C2(ctx)
T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
return T1, T2
return ctx.hypercomb(h, [], **kwargs)
def _airy_zero(ctx, which, k, derivative, complex=False):
# Asymptotic formulas are given in DLMF section 9.9
def U(t): return t**(2/3.)*(1-7/(t**2*48))
def T(t): return t**(2/3.)*(1+5/(t**2*48))
k = int(k)
assert k >= 1
assert derivative in (0,1)
if which == 0:
if derivative:
return ctx.findroot(lambda z: ctx.airyai(z,1),
-U(3*ctx.pi*(4*k-3)/8))
return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8))
if which == 1 and complex == False:
if derivative:
return ctx.findroot(lambda z: ctx.airybi(z,1),
-U(3*ctx.pi*(4*k-1)/8))
return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8))
if which == 1 and complex == True:
if derivative:
t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2
s = ctx.expjpi(ctx.mpf(1)/3) * T(t)
return ctx.findroot(lambda z: ctx.airybi(z,1), s)
t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2
s = ctx.expjpi(ctx.mpf(1)/3) * U(t)
return ctx.findroot(ctx.airybi, s)
@defun
def airyaizero(ctx, k, derivative=0):
return _airy_zero(ctx, 0, k, derivative, False)
@defun
def airybizero(ctx, k, derivative=0, complex=False):
return _airy_zero(ctx, 1, k, derivative, complex)
def _scorer(ctx, z, which, kwargs):
z = ctx.convert(z)
if ctx.isinf(z):
if z == ctx.inf:
if which == 0: return 1/z
if which == 1: return z
if z == ctx.ninf:
return 1/z
raise ValueError("essential singularity")
if z:
extraprec = max(0, int(1.5*ctx.mag(z)))
else:
extraprec = 0
if kwargs.get('derivative'):
raise NotImplementedError
# Direct asymptotic expansions, to avoid
# exponentially large cancellation
try:
if ctx.mag(z) > 3:
if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999:
def h():
return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999:
def h():
return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
except ctx.NoConvergence:
pass
def h():
A = ctx.airybi(z, **kwargs)/3
B = -2*ctx.pi
if which == 1:
A *= 2
B *= -1
ctx.prec += extraprec
w = z**3/9
ctx.prec -= extraprec
T1 = [A], [1], [], [], [], [], 0
T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w
return T1, T2
return ctx.hypercomb(h, [], **kwargs)
@defun
def scorergi(ctx, z, **kwargs):
return _scorer(ctx, z, 0, kwargs)
@defun
def scorerhi(ctx, z, **kwargs):
return _scorer(ctx, z, 1, kwargs)
@defun_wrapped
def coulombc(ctx, l, eta, _cache={}):
if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
return +_cache[l,eta][1]
G3 = ctx.loggamma(2*l+2)
G1 = ctx.loggamma(1+l+ctx.j*eta)
G2 = ctx.loggamma(1+l-ctx.j*eta)
v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3)
if not (ctx.im(l) or ctx.im(eta)):
v = ctx.re(v)
_cache[l,eta] = (ctx.prec, v)
return v
@defun_wrapped
def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs):
# Regular Coulomb wave function
# Note: w can be either 1 or -1; the other may be better in some cases
# TODO: check that chop=True chops when and only when it should
#ctx.prec += 10
def h(l, eta):
try:
jw = ctx.j*w
jwz = ctx.fmul(jw, z, exact=True)
jwz2 = ctx.fmul(jwz, -2, exact=True)
C = ctx.coulombc(l, eta)
T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \
[2*l+2], jwz2
except ValueError:
T1 = [0], [-1], [], [], [], [], 0
return (T1,)
v = ctx.hypercomb(h, [l,eta], **kwargs)
if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \
(ctx.re(z) >= 0):
v = ctx.re(v)
return v
@defun_wrapped
def _coulomb_chi(ctx, l, eta, _cache={}):
if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
return _cache[l,eta][1]
def terms():
l2 = -l-1
jeta = ctx.j*eta
return [ctx.loggamma(1+l+jeta) * (-0.5j),
ctx.loggamma(1+l-jeta) * (0.5j),
ctx.loggamma(1+l2+jeta) * (0.5j),
ctx.loggamma(1+l2-jeta) * (-0.5j),
-(l+0.5)*ctx.pi]
v = ctx.sum_accurately(terms, 1)
_cache[l,eta] = (ctx.prec, v)
return v
@defun_wrapped
def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs):
# Irregular Coulomb wave function
# Note: w can be either 1 or -1; the other may be better in some cases
# TODO: check that chop=True chops when and only when it should
if not ctx._im(l):
l = ctx._re(l) # XXX: for isint
def h(l, eta):
# Force perturbation for integers and half-integers
if ctx.isint(l*2):
T1 = [0], [-1], [], [], [], [], 0
return (T1,)
l2 = -l-1
try:
chi = ctx._coulomb_chi(l, eta)
jw = ctx.j*w
s = ctx.sin(chi); c = ctx.cos(chi)
C1 = ctx.coulombc(l,eta)
C2 = ctx.coulombc(l2,eta)
u = ctx.exp(jw*z)
x = -2*jw*z
T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \
[1+l+jw*eta], [2*l+2], x
T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \
[1+l2+jw*eta], [2*l2+2], x
return T1, T2
except ValueError:
T1 = [0], [-1], [], [], [], [], 0
return (T1,)
v = ctx.hypercomb(h, [l,eta], **kwargs)
if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \
(ctx._re(z) >= 0):
v = ctx._re(v)
return v
def mcmahon(ctx,kind,prime,v,m):
"""
Computes an estimate for the location of the Bessel function zero
j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic
expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)).
Returns (r,err) where r is the estimated location of the root
and err is a positive number estimating the error of the
asymptotic expansion.
"""
u = 4*v**2
if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4
if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4
if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4
if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4
if not prime:
s1 = b
s2 = -(u-1)/(8*b)
s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3)
s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5)
s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7)
if prime:
s1 = b
s2 = -(u+3)/(8*b)
s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3)
s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5)
s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7)
terms = [s1,s2,s3,s4,s5]
s = s1
err = 0.0
for i in range(1,len(terms)):
if abs(terms[i]) < abs(terms[i-1]):
s += terms[i]
else:
err = abs(terms[i])
if i == len(terms)-1:
err = abs(terms[-1])
return s, err
def generalized_bisection(ctx,f,a,b,n):
"""
Given f known to have exactly n simple roots within [a,b],
return a list of n intervals isolating the roots
and having opposite signs at the endpoints.
TODO: this can be optimized, e.g. by reusing evaluation points.
"""
assert n >= 1
N = n+1
points = []
signs = []
while 1:
points = ctx.linspace(a,b,N)
signs = [ctx.sign(f(x)) for x in points]
ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \
if signs[i]*signs[i+1] == -1]
if len(ok_intervals) == n:
return ok_intervals
N = N*2
def find_in_interval(ctx, f, ab):
return ctx.findroot(f, ab, solver='illinois', verify=False)
def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}):
prec = ctx.prec
workprec = max(prec, ctx.mag(v), ctx.mag(m))+10
try:
ctx.prec = workprec
v = ctx.mpf(v)
m = int(m)
prime = int(prime)
assert v >= 0
assert m >= 1
assert prime in (0,1)
if kind == 1:
if prime: f = lambda x: ctx.besselj(v,x,derivative=1)
else: f = lambda x: ctx.besselj(v,x)
if kind == 2:
if prime: f = lambda x: ctx.bessely(v,x,derivative=1)
else: f = lambda x: ctx.bessely(v,x)
# The first root of J' is very close to 0 for small
# orders, and this needs to be special-cased
if kind == 1 and prime and m == 1:
if v == 0:
return ctx.zero
if v <= 1:
# TODO: use v <= j'_{v,1} < y_{v,1}?
r = 2*ctx.sqrt(v*(1+v)/(v+2))
return find_in_interval(ctx, f, (r/10, 2*r))
if (kind,prime,v,m) in _interval_cache:
return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m])
r, err = mcmahon(ctx, kind, prime, v, m)
if err < isoltol:
return find_in_interval(ctx, f, (r-isoltol, r+isoltol))
# An x such that 0 < x < r_{v,1}
if kind == 1 and not prime: low = 2.4
if kind == 1 and prime: low = 1.8
if kind == 2 and not prime: low = 0.8
if kind == 2 and prime: low = 2.0
n = m+1
while 1:
r1, err = mcmahon(ctx, kind, prime, v, n)
if err < isoltol:
r2, err2 = mcmahon(ctx, kind, prime, v, n+1)
intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n)
for k, ab in enumerate(intervals):
_interval_cache[kind,prime,v,k+1] = ab
return find_in_interval(ctx, f, intervals[m-1])
else:
n = n*2
finally:
ctx.prec = prec
@defun
def besseljzero(ctx, v, m, derivative=0):
r"""
For a real order `\nu \ge 0` and a positive integer `m`, returns
`j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively,
with *derivative=1*, gives the first nonnegative simple zero
`j'_{\nu,m}` of `J'_{\nu}(z)`.
The indexing convention is that used by Abramowitz & Stegun
and the DLMF. Note the special case `j'_{0,1} = 0`, while all other
zeros are positive. In effect, only simple zeros are counted
(all zeros of Bessel functions are simple except possibly `z = 0`)
and `j_{\nu,m}` becomes a monotonic function of both `\nu`
and `m`.
The zeros are interlaced according to the inequalities
.. math ::
j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1}
j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots
**Examples**
Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3)
2.404825557695772768621632
5.520078110286310649596604
8.653727912911012216954199
>>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3)
3.831705970207512315614436
7.01558666981561875353705
10.17346813506272207718571
>>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3)
5.135622301840682556301402
8.417244140399864857783614
11.61984117214905942709415
Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`::
0.0
3.831705970207512315614436
7.01558666981561875353705
>>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1)
1.84118378134065930264363
5.331442773525032636884016
8.536316366346285834358961
>>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1)
3.054236928227140322755932
6.706133194158459146634394
9.969467823087595793179143
Zeros with large index::
>>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000)
313.3742660775278447196902
3140.807295225078628895545
31415.14114171350798533666
>>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000)
321.1893195676003157339222
3148.657306813047523500494
31422.9947255486291798943
>>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1)
311.8018681873704508125112
3139.236339643802482833973
31413.57032947022399485808
Zeros of functions with large order::
>>> besseljzero(50,1)
57.11689916011917411936228
>>> besseljzero(50,2)
62.80769876483536093435393
>>> besseljzero(50,100)
388.6936600656058834640981
>>> besseljzero(50,1,1)
52.99764038731665010944037
>>> besseljzero(50,2,1)
60.02631933279942589882363
>>> besseljzero(50,100,1)
387.1083151608726181086283
Zeros of functions with fractional order::
>>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4)
3.141592653589793238462643
4.493409457909064175307881
15.15657692957458622921634
Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite
products over their zeros::
>>> v,z = 2, mpf(1)
>>> (z/2)**v/gamma(v+1) * \
... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf])
...
0.1149034849319004804696469
>>> besselj(v,z)
0.1149034849319004804696469
>>> (z/2)**(v-1)/2/gamma(v) * \
... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf])
...
0.2102436158811325550203884
>>> besselj(v,z,1)
0.2102436158811325550203884
"""
return +bessel_zero(ctx, 1, derivative, v, m)
@defun
def besselyzero(ctx, v, m, derivative=0):
r"""
For a real order `\nu \ge 0` and a positive integer `m`, returns
`y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively,
with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of
`Y'_{\nu}(z)`.
The zeros are interlaced according to the inequalities
.. math ::
y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1}
y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots
**Examples**
Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3)
0.8935769662791675215848871
3.957678419314857868375677
7.086051060301772697623625
>>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3)
2.197141326031017035149034
5.429681040794135132772005
8.596005868331168926429606
>>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3)
3.384241767149593472701426
6.793807513268267538291167
10.02347797936003797850539
Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`::
>>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1)
2.197141326031017035149034
5.429681040794135132772005
8.596005868331168926429606
>>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1)
3.683022856585177699898967
6.941499953654175655751944
10.12340465543661307978775
>>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1)
5.002582931446063945200176
8.350724701413079526349714
11.57419546521764654624265
Zeros with large index::
>>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000)
311.8034717601871549333419
3139.236498918198006794026
31413.57034538691205229188
>>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000)
319.6183338562782156235062
3147.086508524556404473186
31421.42392920214673402828
>>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1)
313.3726705426359345050449
3140.807136030340213610065
31415.14112579761578220175
Zeros of functions with large order::
>>> besselyzero(50,1)
53.50285882040036394680237
>>> besselyzero(50,2)
60.11244442774058114686022
>>> besselyzero(50,100)
387.1096509824943957706835
>>> besselyzero(50,1,1)
56.96290427516751320063605
>>> besselyzero(50,2,1)
62.74888166945933944036623
>>> besselyzero(50,100,1)
388.6923300548309258355475
Zeros of functions with fractional order::
>>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4)
1.570796326794896619231322
2.798386045783887136720249
13.56721208770735123376018
"""
return +bessel_zero(ctx, 2, derivative, v, m)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/bessel.py
|
bessel.py
|
from .functions import defun, defun_wrapped
@defun_wrapped
def _erf_complex(ctx, z):
z2 = ctx.square_exp_arg(z, -1)
#z2 = -z**2
v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2)
if not ctx._re(z):
v = ctx._im(v)*ctx.j
return v
@defun_wrapped
def _erfc_complex(ctx, z):
if ctx.re(z) > 2:
z2 = ctx.square_exp_arg(z)
nz2 = ctx.fneg(z2, exact=True)
v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2)
else:
v = 1 - ctx._erf_complex(z)
if not ctx._re(z):
v = 1+ctx._im(v)*ctx.j
return v
@defun
def erf(ctx, z):
z = ctx.convert(z)
if ctx._is_real_type(z):
try:
return ctx._erf(z)
except NotImplementedError:
pass
if ctx._is_complex_type(z) and not z.imag:
try:
return type(z)(ctx._erf(z.real))
except NotImplementedError:
pass
return ctx._erf_complex(z)
@defun
def erfc(ctx, z):
z = ctx.convert(z)
if ctx._is_real_type(z):
try:
return ctx._erfc(z)
except NotImplementedError:
pass
if ctx._is_complex_type(z) and not z.imag:
try:
return type(z)(ctx._erfc(z.real))
except NotImplementedError:
pass
return ctx._erfc_complex(z)
@defun
def square_exp_arg(ctx, z, mult=1, reciprocal=False):
prec = ctx.prec*4+20
if reciprocal:
z2 = ctx.fmul(z, z, prec=prec)
z2 = ctx.fdiv(ctx.one, z2, prec=prec)
else:
z2 = ctx.fmul(z, z, prec=prec)
if mult != 1:
z2 = ctx.fmul(z2, mult, exact=True)
return z2
@defun_wrapped
def erfi(ctx, z):
if not z:
return z
z2 = ctx.square_exp_arg(z)
v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2)
if not ctx._re(z):
v = ctx._im(v)*ctx.j
return v
@defun_wrapped
def erfinv(ctx, x):
xre = ctx._re(x)
if (xre != x) or (xre < -1) or (xre > 1):
return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1")
x = xre
#if ctx.isnan(x): return x
if not x: return x
if x == 1: return ctx.inf
if x == -1: return ctx.ninf
if abs(x) < 0.9:
a = 0.53728*x**3 + 0.813198*x
else:
# An asymptotic formula
u = ctx.ln(2/ctx.pi/(abs(x)-1)**2)
a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2)
ctx.prec += 10
return ctx.findroot(lambda t: ctx.erf(t)-x, a)
@defun_wrapped
def npdf(ctx, x, mu=0, sigma=1):
sigma = ctx.convert(sigma)
return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi))
@defun_wrapped
def ncdf(ctx, x, mu=0, sigma=1):
a = (x-mu)/(sigma*ctx.sqrt(2))
if a < 0:
return ctx.erfc(-a)/2
else:
return (1+ctx.erf(a))/2
@defun_wrapped
def betainc(ctx, a, b, x1=0, x2=1, regularized=False):
if x1 == x2:
v = 0
elif not x1:
if x1 == 0 and x2 == 1:
v = ctx.beta(a, b)
else:
v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a
else:
m, d = ctx.nint_distance(a)
if m <= 0:
if d < -ctx.prec:
h = +ctx.eps
ctx.prec *= 2
a += h
elif d < -4:
ctx.prec -= d
s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2)
s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1)
v = (s1 - s2) / a
if regularized:
v /= ctx.beta(a,b)
return v
@defun
def gammainc(ctx, z, a=0, b=None, regularized=False):
regularized = bool(regularized)
z = ctx.convert(z)
if a is None:
a = ctx.zero
lower_modified = False
else:
a = ctx.convert(a)
lower_modified = a != ctx.zero
if b is None:
b = ctx.inf
upper_modified = False
else:
b = ctx.convert(b)
upper_modified = b != ctx.inf
# Complete gamma function
if not (upper_modified or lower_modified):
if regularized:
if ctx.re(z) < 0:
return ctx.inf
elif ctx.re(z) > 0:
return ctx.one
else:
return ctx.nan
return ctx.gamma(z)
if a == b:
return ctx.zero
# Standardize
if ctx.re(a) > ctx.re(b):
return -ctx.gammainc(z, b, a, regularized)
# Generalized gamma
if upper_modified and lower_modified:
return +ctx._gamma3(z, a, b, regularized)
# Upper gamma
elif lower_modified:
return ctx._upper_gamma(z, a, regularized)
# Lower gamma
elif upper_modified:
return ctx._lower_gamma(z, b, regularized)
@defun
def _lower_gamma(ctx, z, b, regularized=False):
# Pole
if ctx.isnpint(z):
return type(z)(ctx.inf)
G = [z] * regularized
negb = ctx.fneg(b, exact=True)
def h(z):
T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
return (T1,)
return ctx.hypercomb(h, [z])
@defun
def _upper_gamma(ctx, z, a, regularized=False):
# Fast integer case, when available
if ctx.isint(z):
try:
if regularized:
# Gamma pole
if ctx.isnpint(z):
return type(z)(ctx.zero)
orig = ctx.prec
try:
ctx.prec += 10
return ctx._gamma_upper_int(z, a) / ctx.gamma(z)
finally:
ctx.prec = orig
else:
return ctx._gamma_upper_int(z, a)
except NotImplementedError:
pass
nega = ctx.fneg(a, exact=True)
G = [z] * regularized
# Use 2F0 series when possible; fall back to lower gamma representation
try:
def h(z):
r = z-1
return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
return ctx.hypercomb(h, [z], force_series=True)
except ctx.NoConvergence:
def h(z):
T1 = [], [1, z-1], [z], G, [], [], 0
T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
return T1, T2
return ctx.hypercomb(h, [z])
@defun
def _gamma3(ctx, z, a, b, regularized=False):
pole = ctx.isnpint(z)
if regularized and pole:
return ctx.zero
try:
ctx.prec += 15
# We don't know in advance whether it's better to write as a difference
# of lower or upper gamma functions, so try both
T1 = ctx.gammainc(z, a, regularized=regularized)
T2 = ctx.gammainc(z, b, regularized=regularized)
R = T1 - T2
if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
return R
if not pole:
T1 = ctx.gammainc(z, 0, b, regularized=regularized)
T2 = ctx.gammainc(z, 0, a, regularized=regularized)
R = T1 - T2
# May be ok, but should probably at least print a warning
# about possible cancellation
if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
return R
finally:
ctx.prec -= 15
raise NotImplementedError
@defun_wrapped
def expint(ctx, n, z):
if ctx.isint(n) and ctx._is_real_type(z):
try:
return ctx._expint_int(n, z)
except NotImplementedError:
pass
if ctx.isnan(n) or ctx.isnan(z):
return z*n
if z == ctx.inf:
return 1/z
if z == 0:
# integral from 1 to infinity of t^n
if ctx.re(n) <= 1:
# TODO: reasonable sign of infinity
return type(z)(ctx.inf)
else:
return ctx.one/(n-1)
if n == 0:
return ctx.exp(-z)/z
if n == -1:
return ctx.exp(-z)*(z+1)/z**2
return z**(n-1) * ctx.gammainc(1-n, z)
@defun_wrapped
def li(ctx, z, offset=False):
if offset:
if z == 2:
return ctx.zero
return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2)
if not z:
return z
if z == 1:
return ctx.ninf
return ctx.ei(ctx.ln(z))
@defun
def ei(ctx, z):
try:
return ctx._ei(z)
except NotImplementedError:
return ctx._ei_generic(z)
@defun_wrapped
def _ei_generic(ctx, z):
# Note: the following is currently untested because mp and fp
# both use special-case ei code
if z == ctx.inf:
return z
if z == ctx.ninf:
return ctx.zero
if ctx.mag(z) > 1:
try:
r = ctx.one/z
v = ctx.exp(z)*ctx.hyper([1,1],[],r,
maxterms=ctx.prec, force_series=True)/z
im = ctx._im(z)
if im > 0:
v += ctx.pi*ctx.j
if im < 0:
v -= ctx.pi*ctx.j
return v
except ctx.NoConvergence:
pass
v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler
if ctx._im(z):
v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z))
else:
v += ctx.log(abs(z))
return v
@defun
def e1(ctx, z):
try:
return ctx._e1(z)
except NotImplementedError:
return ctx.expint(1, z)
@defun
def ci(ctx, z):
try:
return ctx._ci(z)
except NotImplementedError:
return ctx._ci_generic(z)
@defun_wrapped
def _ci_generic(ctx, z):
if ctx.isinf(z):
if z == ctx.inf: return ctx.zero
if z == ctx.ninf: return ctx.pi*1j
jz = ctx.fmul(ctx.j,z,exact=True)
njz = ctx.fneg(jz,exact=True)
v = 0.5*(ctx.ei(jz) + ctx.ei(njz))
zreal = ctx._re(z)
zimag = ctx._im(z)
if zreal == 0:
if zimag > 0: v += ctx.pi*0.5j
if zimag < 0: v -= ctx.pi*0.5j
if zreal < 0:
if zimag >= 0: v += ctx.pi*1j
if zimag < 0: v -= ctx.pi*1j
if ctx._is_real_type(z) and zreal > 0:
v = ctx._re(v)
return v
@defun
def si(ctx, z):
try:
return ctx._si(z)
except NotImplementedError:
return ctx._si_generic(z)
@defun_wrapped
def _si_generic(ctx, z):
if ctx.isinf(z):
if z == ctx.inf: return 0.5*ctx.pi
if z == ctx.ninf: return -0.5*ctx.pi
# Suffers from cancellation near 0
if ctx.mag(z) >= -1:
jz = ctx.fmul(ctx.j,z,exact=True)
njz = ctx.fneg(jz,exact=True)
v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz))
zreal = ctx._re(z)
if zreal > 0:
v -= 0.5*ctx.pi
if zreal < 0:
v += 0.5*ctx.pi
if ctx._is_real_type(z):
v = ctx._re(v)
return v
else:
return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z)
@defun_wrapped
def chi(ctx, z):
nz = ctx.fneg(z, exact=True)
v = 0.5*(ctx.ei(z) + ctx.ei(nz))
zreal = ctx._re(z)
zimag = ctx._im(z)
if zimag > 0:
v += ctx.pi*0.5j
elif zimag < 0:
v -= ctx.pi*0.5j
elif zreal < 0:
v += ctx.pi*1j
return v
@defun_wrapped
def shi(ctx, z):
# Suffers from cancellation near 0
if ctx.mag(z) >= -1:
nz = ctx.fneg(z, exact=True)
v = 0.5*(ctx.ei(z) - ctx.ei(nz))
zimag = ctx._im(z)
if zimag > 0: v -= 0.5j*ctx.pi
if zimag < 0: v += 0.5j*ctx.pi
return v
else:
return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z)
@defun_wrapped
def fresnels(ctx, z):
if z == ctx.inf:
return ctx.mpf(0.5)
if z == ctx.ninf:
return ctx.mpf(-0.5)
return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16)
@defun_wrapped
def fresnelc(ctx, z):
if z == ctx.inf:
return ctx.mpf(0.5)
if z == ctx.ninf:
return ctx.mpf(-0.5)
return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/expintegrals.py
|
expintegrals.py
|
import math
class RSCache:
def __init__(ctx):
ctx._rs_cache = [0, 10, {}, {}]
from .functions import defun
#-------------------------------------------------------------------------------#
# #
# coef(ctx, J, eps, _cache=[0, 10, {} ] ) #
# #
#-------------------------------------------------------------------------------#
# This function computes the coefficients c[n] defined on (I, equation (47))
# but see also (II, section 3.14).
#
# Since these coefficients are very difficult to compute we save the values
# in a cache. So if we compute several values of the functions Rzeta(s) for
# near values of s, we do not recompute these coefficients.
#
# c[n] are the Taylor coefficients of the function:
#
# F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z))
#
#
def _coef(ctx, J, eps):
r"""
Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps
**Definition**
The coefficients c_n are defined by
.. math ::
\begin{equation}
F(z)=\frac{e^{\pi i
\bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi
z}=\sum_{n=0}^\infty c_{2n} z^{2n}
\end{equation}
they are computed applying the relation
.. math ::
\begin{multline}
c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n}
\sum_{k=0}^n\frac{(-1)^k}{(2k)!}
2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\
+e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{
E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}.
\end{multline}
"""
newJ = J+2 # compute more coefficients that are needed
neweps6 = eps/2. # compute with a slight more precision
# that are needed
# PREPARATION FOR THE COMPUTATION OF V(N) AND W(N)
# See II Section 3.16
#
# Computing the exponent wpvw of the error II equation (81)
wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6))
# Preparation of Euler numbers (we need until the 2*RS_NEWJ)
E = ctx._eulernum(2*newJ)
# Now we have in the cache all the needed Euler numbers.
#
# Computing the powers of pi
#
# We need to compute the powers pi**n for 1<= n <= 2*J
# with relative error less than 2**(-wpvw)
# it is easy to show that this is obtained
# taking wppi as the least d with
# 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw
# In II Section 3.9 we need also that
# wppi > wptcoef[0], and that the powers
# here computed 0<= k <= 2*newJ are more
# than those needed there that are 2*L-2.
# so we need J >= L this will be checked
# before computing tcoef[]
wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw)
ctx.prec = wppi
pipower = {}
pipower[0] = ctx.one
pipower[1] = ctx.pi
for n in range(2,2*newJ+1):
pipower[n] = pipower[n-1]*ctx.pi
# COMPUTING THE COEFFICIENTS v(n) AND w(n)
# see II equation (61) and equations (81) and (82)
ctx.prec = wpvw+2
v={}
w={}
for n in range(0,newJ+1):
va = (-1)**n * ctx._eulernum(2*n)
va = ctx.mpf(va)/ctx.fac(2*n)
v[n]=va*pipower[2*n]
for n in range(0,2*newJ+1):
wa = ctx.one/ctx.fac(n)
wa=wa/(2**n)
w[n]=wa*pipower[n]
# COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2
# See II Section 3.16
ctx.prec = 15
wpp1a = 9 - ctx.mag(neweps6)
P1 = {}
for n in range(0,newJ+1):
ctx.prec = 15
wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
ctx.prec = wpp1
sump = 0
for k in range(0,n+1):
sump += ((-1)**k) * v[k]*w[2*n-2*k]
P1[n]=((-1)**(n+1))*ctx.j*sump
P2={}
for n in range(0,newJ+1):
ctx.prec = 15
wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
ctx.prec = wpp2
sump = 0
for k in range(0,n+1):
sump += (ctx.j**(n-k)) * v[k]*w[n-k]
P2[n]=sump
# COMPUTING THE COEFFICIENTS c[2n]
# See II Section 3.14
ctx.prec = 15
wpc0 = 5 - ctx.mag(neweps6)
wpc = max(6,4*newJ+wpc0)
ctx.prec = wpc
mu = ctx.sqrt(ctx.mpf('2'))/2
nu = ctx.expjpi(3./8)/2
c={}
for n in range(0,newJ):
ctx.prec = 15
wpc = max(6,4*n+wpc0)
ctx.prec = wpc
c[2*n] = mu*P1[n]+nu*P2[n]
for n in range(1,2*newJ,2):
c[n] = 0
return [newJ, neweps6, c, pipower]
def coef(ctx, J, eps):
_cache = ctx._rs_cache
if J <= _cache[0] and eps >= _cache[1]:
return _cache[2], _cache[3]
orig = ctx._mp.prec
try:
data = _coef(ctx._mp, J, eps)
finally:
ctx._mp.prec = orig
if ctx is not ctx._mp:
data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items())
data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items())
ctx._rs_cache[:] = data
return ctx._rs_cache[2], ctx._rs_cache[3]
#-------------------------------------------------------------------------------#
# #
# Rzeta_simul(s,k=0) #
# #
#-------------------------------------------------------------------------------#
# This function return a list with the values:
# Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)),
# .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it))
#
# Useful to compute the function zeta(s) and Z(w) or its derivatives.
#
def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL):
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
# See II Section 3.11 equations (47) and (48)
aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi)
aux1 = ctx.ln(aux1)
aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2
m = 3*xL-3
aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
while((aux1 < m*aux2+ aux3)and (m>1)):
m = m - 1
aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
xM = m
return xM
def aux_J_needed(ctx, xA, xeps4, a, xB1, xM):
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
# IN THE TAYLOR SERIES OF F.
# See II Section 3.11 equation (49))
# Only determine one
h1 = xeps4/(632*xA)
h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3)
h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM
h3 = min(h1,h2)
return h3
def Rzeta_simul(ctx, s, der=0):
# First we take the value of ctx.prec
wpinitial = ctx.prec
# INITIALIZATION
# Take the real and imaginary part of s
t = ctx._im(s)
xsigma = ctx._re(s)
ysigma = 1 - xsigma
# Now compute several parameter that appear on the program
ctx.prec = 15
a = ctx.sqrt(t/(2*ctx.pi))
xasigma = a ** xsigma
yasigma = a ** ysigma
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
xA1=ctx.power(2, ctx.mag(xasigma)-1)
yA1=ctx.power(2, ctx.mag(yasigma)-1)
# We compute various epsilon's (see II end of Section 3.1)
eps = ctx.power(2, -wpinitial)
eps1 = eps/6.
xeps2 = eps * xA1/3.
yeps2 = eps * yA1/3.
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
# ON sigma
# constant b and c (see I Theorem 2 formula (26) )
# coefficients A and B1 (see I Section 6.1 equation (50))
#
# here we not need high precision
ctx.prec = 15
if xsigma > 0:
xb = 2.
xc = math.pow(9,xsigma)/4.44288
# 4.44288 =(math.sqrt(2)*math.pi)
xA = math.pow(9,xsigma)
xB1 = 1
else:
xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
xc = math.pow(2,-xsigma)/4.44288
xA = math.pow(2,-xsigma)
xB1 = 1.10789 # = 2*sqrt(1-log(2))
if(ysigma > 0):
yb = 2.
yc = math.pow(9,ysigma)/4.44288
# 4.44288 =(math.sqrt(2)*math.pi)
yA = math.pow(9,ysigma)
yB1 = 1
else:
yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
yc = math.pow(2,-ysigma)/4.44288
yA = math.pow(2,-ysigma)
yB1 = 1.10789 # = 2*sqrt(1-log(2))
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
# CORRECTION
# See II Section 3.2
ctx.prec = 15
xL = 1
while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2:
xL = xL+1
xL = max(2,xL)
yL = 1
while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2:
yL = yL+1
yL = max(2,yL)
# The number L has to satify some conditions.
# If not RS can not compute Rzeta(s) with the prescribed precision
# (see II, Section 3.2 condition (20) ) and
# (II, Section 3.3 condition (22) ). Also we have added
# an additional technical condition in Section 3.17 Proposition 17
if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \
(3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)):
ctx.prec = wpinitial
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
# We take the maximum of the two values
L = max(xL, yL)
# INITIALIZATION (CONTINUATION)
#
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
# each term of the RS correction must be computed with error <= eps3
xeps3 = xeps2/(4*xL)
yeps3 = yeps2/(4*yL)
# eps4 is defined on (II Section 3.6 equation (30) )
# each component of the formula (II Section 3.6 equation (29) )
# must be computed with error <= eps4
xeps4 = xeps3/(3*xL)
yeps4 = yeps3/(3*yL)
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL)
yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL)
M = max(xM, yM)
# COMPUTING NUMBER OF TERMS J NEEDED
h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM)
h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM)
h3 = min(h3,h4)
J = 12
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
while jvalue > h3:
J = J+1
jvalue = (2*ctx.pi)*jvalue/J
# COMPUTING eps5[m] for 1 <= m <= 21
# See II Section 10 equation (43)
# We choose the minimum of the two possibilities
eps5={}
xforeps5 = math.pi*math.pi*xB1*a
yforeps5 = math.pi*math.pi*yB1*a
for m in range(0,22):
xaux1 = math.pow(xforeps5, m/3)/(316.*xA)
yaux1 = math.pow(yforeps5, m/3)/(316.*yA)
aux1 = min(xaux1, yaux1)
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
aux2 = math.sqrt(aux2)
eps5[m] = (aux1*aux2*min(xeps4,yeps4))
# COMPUTING wpfp
# See II Section 3.13 equation (59)
twenty = min(3*L-3, 21)+1
aux = 6812*J
wpfp = ctx.mag(44*J)
for m in range(0,twenty):
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
# COMPUTING N AND p
# See II Section
ctx.prec = wpfp + ctx.mag(t)+20
a = ctx.sqrt(t/(2*ctx.pi))
N = ctx.floor(a)
p = 1-2*(a-N)
# now we get a rounded version of p
# to the precision wpfp
# this possibly is not necessary
num=ctx.floor(p*(ctx.mpf('2')**wpfp))
difference = p * (ctx.mpf('2')**wpfp)-num
if (difference < 0.5):
num = num
else:
num = num+1
p = ctx.convert(num * (ctx.mpf('2')**(-wpfp)))
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
# We shall use the notation cc[n], since there is
# a constant that is called c
# See II Section 3.14
# We compute the coefficients and also save then in a
# cache. The bulk of the computation is passed to
# the function coef()
#
# eps6 is defined in II Section 3.13 equation (58)
eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J)
# Now we compute the coefficients
cc = {}
cont = {}
cont, pipowers = coef(ctx, J, eps6)
cc=cont.copy() # we need a copy since we have
# to change his values.
Fp={} # this is the adequate locus of this
for n in range(M, 3*L-2):
Fp[n] = 0
Fp={}
ctx.prec = wpfp
for m in range(0,M+1):
sumP = 0
for k in range(2*J-m-1,-1,-1):
sumP = (sumP * p)+ cc[k]
Fp[m] = sumP
# preparation of the new coefficients
for k in range(0,2*J-m-1):
cc[k] = (k+1)* cc[k+1]
# COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k]
# See II Section 3.17
#
# First we compute the working precisions xwpd[k]
# Se II equation (92)
xwpd={}
d1 = max(6,ctx.mag(40*L*L))
xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1
xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2
for n in range(0,L):
xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2
xwpd[n]=max(xd3,d1)
# procedure of II Section 3.17
ctx.prec = xwpd[1]+10
xpsigma = 1-(2*xsigma)
xd = {}
xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0
xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0
for n in range(1,L):
ctx.prec = xwpd[n]+10
for k in range(0,3*n//2+1):
m = 3*n-2*k
if(m!=0):
m1 = ctx.one/m
c1= m1/4
c2=(xpsigma*m1)/2
c3=-(m+1)
xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1]
else:
xd[0,n,k]=0
for r in range(0,k):
add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
xd[0,n,k] -= ((-1)**(k-r))*add
xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0
for mu in range(-2,der+1):
for n in range(-2,L):
for k in range(-3,max(1,3*n//2+2)):
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
xd[mu,n,k] = 0
for mu in range(1,der+1):
for n in range(0,L):
ctx.prec = xwpd[n]+10
for k in range(0,3*n//2+1):
aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3]
xd[mu,n,k] = aux - xd[mu-1,n-1,k-1]
# Now we compute the working precisions ywpd[k]
# Se II equation (92)
ywpd={}
d1 = max(6,ctx.mag(40*L*L))
yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1
yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2
for n in range(0,L):
yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2
ywpd[n]=max(yd3,d1)
# procedure of II Section 3.17
ctx.prec = ywpd[1]+10
ypsigma = 1-(2*ysigma)
yd = {}
yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0
yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0
for n in range(1,L):
ctx.prec = ywpd[n]+10
for k in range(0,3*n//2+1):
m = 3*n-2*k
if(m!=0):
m1 = ctx.one/m
c1= m1/4
c2=(ypsigma*m1)/2
c3=-(m+1)
yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1]
else:
yd[0,n,k]=0
for r in range(0,k):
add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
yd[0,n,k] -= ((-1)**(k-r))*add
yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0
for mu in range(-2,der+1):
for n in range(-2,L):
for k in range(-3,max(1,3*n//2+2)):
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
yd[mu,n,k] = 0
for mu in range(1,der+1):
for n in range(0,L):
ctx.prec = ywpd[n]+10
for k in range(0,3*n//2+1):
aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3]
yd[mu,n,k] = aux - yd[mu-1,n-1,k-1]
# COMPUTING THE COEFFICIENTS xtcoef[k,l]
# See II Section 3.9
#
# computing the needed wp
xwptcoef={}
xwpterm={}
ctx.prec = 15
c1 = ctx.mag(40*(L+2))
xc2 = ctx.mag(68*(L+2)*xA)
xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1
for k in range(0,L):
xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2.
xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5
xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20)
ywptcoef={}
ywpterm={}
ctx.prec = 15
c1 = ctx.mag(40*(L+2))
yc2 = ctx.mag(68*(L+2)*yA)
yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1
for k in range(0,L):
yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2.
ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5
ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10
# check of power of pi
# computing the fortcoef[mu,k,ell]
xfortcoef={}
for mu in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
xfortcoef[mu,k,ell]=0
for mu in range(0,der+1):
for k in range(0,L):
ctx.prec = xwptcoef[k]
for ell in range(0,3*k//2+1):
xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell)
def trunc_a(t):
wp = ctx.prec
ctx.prec = wp + 2
aa = ctx.sqrt(t/(2*ctx.pi))
ctx.prec = wp
return aa
# computing the tcoef[k,ell]
xtcoef={}
for mu in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
xtcoef[mu,k,ell]=0
ctx.prec = max(xwptcoef[0],ywptcoef[0])+3
aa= trunc_a(t)
la = -ctx.ln(aa)
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = xwptcoef[k]
for ell in range(0,3*k//2+1):
xtcoef[chi,k,ell] =0
for mu in range(0, chi+1):
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell]
xtcoef[chi,k,ell] += tcoefter
# COMPUTING THE COEFFICIENTS ytcoef[k,l]
# See II Section 3.9
#
# computing the needed wp
# check of power of pi
# computing the fortcoef[mu,k,ell]
yfortcoef={}
for mu in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
yfortcoef[mu,k,ell]=0
for mu in range(0,der+1):
for k in range(0,L):
ctx.prec = ywptcoef[k]
for ell in range(0,3*k//2+1):
yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell)
# computing the tcoef[k,ell]
ytcoef={}
for chi in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
ytcoef[chi,k,ell]=0
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = ywptcoef[k]
for ell in range(0,3*k//2+1):
ytcoef[chi,k,ell] =0
for mu in range(0, chi+1):
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell]
ytcoef[chi,k,ell] += tcoefter
# COMPUTING tv[k,ell]
# See II Section 3.8
#
# a has a good value
ctx.prec = max(xwptcoef[0], ywptcoef[0])+2
av = {}
av[0] = 1
av[1] = av[0]/a
ctx.prec = max(xwptcoef[0],ywptcoef[0])
for k in range(2,L):
av[k] = av[k-1] * av[1]
# Computing the quotients
xtv = {}
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = xwptcoef[k]
for ell in range(0,3*k//2+1):
xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k]
# Computing the quotients
ytv = {}
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = ywptcoef[k]
for ell in range(0,3*k//2+1):
ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k]
# COMPUTING THE TERMS xterm[k]
# See II Section 3.6
xterm = {}
for chi in range(0,der+1):
for n in range(0,L):
ctx.prec = xwpterm[n]
te = 0
for k in range(0, 3*n//2+1):
te += xtv[chi,n,k]
xterm[chi,n] = te
# COMPUTING THE TERMS yterm[k]
# See II Section 3.6
yterm = {}
for chi in range(0,der+1):
for n in range(0,L):
ctx.prec = ywpterm[n]
te = 0
for k in range(0, 3*n//2+1):
te += ytv[chi,n,k]
yterm[chi,n] = te
# COMPUTING rssum
# See II Section 3.5
xrssum={}
ctx.prec=15
xrsbound = math.sqrt(ctx.pi) * xc /(xb*a)
ctx.prec=15
xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2)
xwprssum = max(xwprssum, ctx.mag(10*(L+1)))
ctx.prec = xwprssum
for chi in range(0,der+1):
xrssum[chi] = 0
for k in range(1,L+1):
xrssum[chi] += xterm[chi,L-k]
yrssum={}
ctx.prec=15
yrsbound = math.sqrt(ctx.pi) * yc /(yb*a)
ctx.prec=15
ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2)
ywprssum = max(ywprssum, ctx.mag(10*(L+1)))
ctx.prec = ywprssum
for chi in range(0,der+1):
yrssum[chi] = 0
for k in range(1,L+1):
yrssum[chi] += yterm[chi,L-k]
# COMPUTING S3
# See II Section 3.19
ctx.prec = 15
A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0]))))
eps8 = eps/(3*A2)
T = t *ctx.ln(t/(2*ctx.pi))
xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T)
ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T)
ctx.prec = max(xwps3, ywps3)
tpi = t/(2*ctx.pi)
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
U = ctx.expj(-arg)
a = trunc_a(t)
xasigma = ctx.power(a, -xsigma)
yasigma = ctx.power(a, -ysigma)
xS3 = ((-1)**(N-1)) * xasigma * U
yS3 = ((-1)**(N-1)) * yasigma * U
# COMPUTING S1 the zetasum
# See II Section 3.18
ctx.prec = 15
xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1)
ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1)
wpsum = max(xwpsum, ywpsum)
ctx.prec = wpsum +10
'''
# This can be improved
xS1={}
yS1={}
for chi in range(0,der+1):
xS1[chi] = 0
yS1[chi] = 0
for n in range(1,int(N)+1):
ln = ctx.ln(n)
xexpn = ctx.exp(-ln*(xsigma+ctx.j*t))
yexpn = ctx.conj(1/(n*xexpn))
for chi in range(0,der+1):
pown = ctx.power(-ln, chi)
xterm = pown*xexpn
yterm = pown*yexpn
xS1[chi] += xterm
yS1[chi] += yterm
'''
xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True)
# END OF COMPUTATION of xrz, yrz
# See II Section 3.1
ctx.prec = 15
xabsS1 = abs(xS1[der])
xabsS2 = abs(xrssum[der] * xS3)
xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) )
ctx.prec = xwpend
xrz={}
for chi in range(0,der+1):
xrz[chi] = xS1[chi]+xrssum[chi]*xS3
ctx.prec = 15
yabsS1 = abs(yS1[der])
yabsS2 = abs(yrssum[der] * yS3)
ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) )
ctx.prec = ywpend
yrz={}
for chi in range(0,der+1):
yrz[chi] = yS1[chi]+yrssum[chi]*yS3
yrz[chi] = ctx.conj(yrz[chi])
ctx.prec = wpinitial
return xrz, yrz
def Rzeta_set(ctx, s, derivatives=[0]):
r"""
Computes several derivatives of the auxiliary function of Riemann `R(s)`.
**Definition**
The function is defined by
.. math ::
\begin{equation}
{\mathop{\mathcal R }\nolimits}(s)=
\int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}-
e^{-\pi i x}}\,dx
\end{equation}
To this function we apply the Riemann-Siegel expansion.
"""
der = max(derivatives)
# First we take the value of ctx.prec
# During the computation we will change ctx.prec, and finally we will
# restaurate the initial value
wpinitial = ctx.prec
# Take the real and imaginary part of s
t = ctx._im(s)
sigma = ctx._re(s)
# Now compute several parameter that appear on the program
ctx.prec = 15
a = ctx.sqrt(t/(2*ctx.pi)) # Careful
asigma = ctx.power(a, sigma) # Careful
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
A1 = ctx.power(2, ctx.mag(asigma)-1)
# We compute various epsilon's (see II end of Section 3.1)
eps = ctx.power(2, -wpinitial)
eps1 = eps/6.
eps2 = eps * A1/3.
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
# ON sigma
# constant b and c (see I Theorem 2 formula (26) )
# coefficients A and B1 (see I Section 6.1 equation (50))
# here we not need high precision
ctx.prec = 15
if sigma > 0:
b = 2.
c = math.pow(9,sigma)/4.44288
# 4.44288 =(math.sqrt(2)*math.pi)
A = math.pow(9,sigma)
B1 = 1
else:
b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
c = math.pow(2,-sigma)/4.44288
A = math.pow(2,-sigma)
B1 = 1.10789 # = 2*sqrt(1-log(2))
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
# CORRECTION
# See II Section 3.2
ctx.prec = 15
L = 1
while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2:
L = L+1
L = max(2,L)
# The number L has to satify some conditions.
# If not RS can not compute Rzeta(s) with the prescribed precision
# (see II, Section 3.2 condition (20) ) and
# (II, Section 3.3 condition (22) ). Also we have added
# an additional technical condition in Section 3.17 Proposition 17
if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)):
#print 'Error Riemann-Siegel can not compute with such precision'
ctx.prec = wpinitial
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
# INITIALIZATION (CONTINUATION)
#
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
# each term of the RS correction must be computed with error <= eps3
eps3 = eps2/(4*L)
# eps4 is defined on (II Section 3.6 equation (30) )
# each component of the formula (II Section 3.6 equation (29) )
# must be computed with error <= eps4
eps4 = eps3/(3*L)
# COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
M = aux_M_Fp(ctx, A, eps4, a, B1, L)
Fp = {}
for n in range(M, 3*L-2):
Fp[n] = 0
# But I have not seen an instance of M != 3*L-3
#
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
# IN THE TAYLOR SERIES OF F.
# See II Section 3.11 equation (49))
h1 = eps4/(632*A)
h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e
h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M
h3 = min(h1,h2)
J=12
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
while jvalue > h3:
J = J+1
jvalue = (2*ctx.pi)*jvalue/J
# COMPUTING eps5[m] for 1 <= m <= 21
# See II Section 10 equation (43)
eps5={}
foreps5 = math.pi*math.pi*B1*a
for m in range(0,22):
aux1 = math.pow(foreps5, m/3)/(316.*A)
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
aux2 = math.sqrt(aux2)
eps5[m] = aux1*aux2*eps4
# COMPUTING wpfp
# See II Section 3.13 equation (59)
twenty = min(3*L-3, 21)+1
aux = 6812*J
wpfp = ctx.mag(44*J)
for m in range(0, twenty):
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
# COMPUTING N AND p
# See II Section
ctx.prec = wpfp + ctx.mag(t) + 20
a = ctx.sqrt(t/(2*ctx.pi))
N = ctx.floor(a)
p = 1-2*(a-N)
# now we get a rounded version of p to the precision wpfp
# this possibly is not necessary
num = ctx.floor(p*(ctx.mpf(2)**wpfp))
difference = p * (ctx.mpf(2)**wpfp)-num
if difference < 0.5:
num = num
else:
num = num+1
p = ctx.convert(num * (ctx.mpf(2)**(-wpfp)))
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
# We shall use the notation cc[n], since there is
# a constant that is called c
# See II Section 3.14
# We compute the coefficients and also save then in a
# cache. The bulk of the computation is passed to
# the function coef()
#
# eps6 is defined in II Section 3.13 equation (58)
eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J)
# Now we compute the coefficients
cc={}
cont={}
cont, pipowers = coef(ctx, J, eps6)
cc = cont.copy() # we need a copy since we have
Fp={}
for n in range(M, 3*L-2):
Fp[n] = 0
ctx.prec = wpfp
for m in range(0,M+1):
sumP = 0
for k in range(2*J-m-1,-1,-1):
sumP = (sumP * p) + cc[k]
Fp[m] = sumP
# preparation of the new coefficients
for k in range(0, 2*J-m-1):
cc[k] = (k+1) * cc[k+1]
# COMPUTING THE NUMBERS d[n,k]
# See II Section 3.17
# First we compute the working precisions wpd[k]
# Se II equation (92)
wpd = {}
d1 = max(6, ctx.mag(40*L*L))
d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1
const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2
for n in range(0,L):
d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2
wpd[n] = max(d3,d1)
# procedure of II Section 3.17
ctx.prec = wpd[1]+10
psigma = 1-(2*sigma)
d = {}
d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0
d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0
for n in range(1,L):
ctx.prec = wpd[n]+10
for k in range(0,3*n//2+1):
m = 3*n-2*k
if (m!=0):
m1 = ctx.one/m
c1 = m1/4
c2 = (psigma*m1)/2
c3 = -(m+1)
d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1]
else:
d[0,n,k]=0
for r in range(0,k):
add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r))
d[0,n,k] -= ((-1)**(k-r))*add
d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0
for mu in range(-2,der+1):
for n in range(-2,L):
for k in range(-3,max(1,3*n//2+2)):
if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)):
d[mu,n,k] = 0
for mu in range(1,der+1):
for n in range(0,L):
ctx.prec = wpd[n]+10
for k in range(0,3*n//2+1):
aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3]
d[mu,n,k] = aux - d[mu-1,n-1,k-1]
# COMPUTING THE COEFFICIENTS t[k,l]
# See II Section 3.9
#
# computing the needed wp
wptcoef = {}
wpterm = {}
ctx.prec = 15
c1 = ctx.mag(40*(L+2))
c2 = ctx.mag(68*(L+2)*A)
c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1
for k in range(0,L):
c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2.
wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10
wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10
# check of power of pi
# computing the fortcoef[mu,k,ell]
fortcoef={}
for mu in derivatives:
for k in range(0,L):
for ell in range(-2,3*k//2+1):
fortcoef[mu,k,ell]=0
for mu in derivatives:
for k in range(0,L):
ctx.prec = wptcoef[k]
for ell in range(0,3*k//2+1):
fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell)
def trunc_a(t):
wp = ctx.prec
ctx.prec = wp + 2
aa = ctx.sqrt(t/(2*ctx.pi))
ctx.prec = wp
return aa
# computing the tcoef[chi,k,ell]
tcoef={}
for chi in derivatives:
for k in range(0,L):
for ell in range(-2,3*k//2+1):
tcoef[chi,k,ell]=0
ctx.prec = wptcoef[0]+3
aa = trunc_a(t)
la = -ctx.ln(aa)
for chi in derivatives:
for k in range(0,L):
ctx.prec = wptcoef[k]
for ell in range(0,3*k//2+1):
tcoef[chi,k,ell] = 0
for mu in range(0, chi+1):
tcoefter = ctx.binomial(chi,mu) * la**mu * \
fortcoef[chi-mu,k,ell]
tcoef[chi,k,ell] += tcoefter
# COMPUTING tv[k,ell]
# See II Section 3.8
# Computing the powers av[k] = a**(-k)
ctx.prec = wptcoef[0] + 2
# a has a good value of a.
# See II Section 3.6
av = {}
av[0] = 1
av[1] = av[0]/a
ctx.prec = wptcoef[0]
for k in range(2,L):
av[k] = av[k-1] * av[1]
# Computing the quotients
tv = {}
for chi in derivatives:
for k in range(0,L):
ctx.prec = wptcoef[k]
for ell in range(0,3*k//2+1):
tv[chi,k,ell] = tcoef[chi,k,ell]* av[k]
# COMPUTING THE TERMS term[k]
# See II Section 3.6
term = {}
for chi in derivatives:
for n in range(0,L):
ctx.prec = wpterm[n]
te = 0
for k in range(0, 3*n//2+1):
te += tv[chi,n,k]
term[chi,n] = te
# COMPUTING rssum
# See II Section 3.5
rssum={}
ctx.prec=15
rsbound = math.sqrt(ctx.pi) * c /(b*a)
ctx.prec=15
wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2)
wprssum = max(wprssum, ctx.mag(10*(L+1)))
ctx.prec = wprssum
for chi in derivatives:
rssum[chi] = 0
for k in range(1,L+1):
rssum[chi] += term[chi,L-k]
# COMPUTING S3
# See II Section 3.19
ctx.prec = 15
A2 = 2**(ctx.mag(rssum[0]))
eps8 = eps/(3* A2)
T = t * ctx.ln(t/(2*ctx.pi))
wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T)
ctx.prec = wps3
tpi = t/(2*ctx.pi)
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
U = ctx.expj(-arg)
a = trunc_a(t)
asigma = ctx.power(a, -sigma)
S3 = ((-1)**(N-1)) * asigma * U
# COMPUTING S1 the zetasum
# See II Section 3.18
ctx.prec = 15
wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1)
ctx.prec = wpsum + 10
'''
# This can be improved
S1 = {}
for chi in derivatives:
S1[chi] = 0
for n in range(1,int(N)+1):
ln = ctx.ln(n)
expn = ctx.exp(-ln*(sigma+ctx.j*t))
for chi in derivatives:
term = ctx.power(-ln, chi)*expn
S1[chi] += term
'''
S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0]
# END OF COMPUTATION
# See II Section 3.1
ctx.prec = 15
absS1 = abs(S1[der])
absS2 = abs(rssum[der] * S3)
wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2)))
ctx.prec = wpend
rz = {}
for chi in derivatives:
rz[chi] = S1[chi]+rssum[chi]*S3
ctx.prec = wpinitial
return rz
def z_half(ctx,t,der=0):
r"""
z_half(t,der=0) Computes Z^(der)(t)
"""
s=ctx.mpf('0.5')+ctx.j*t
wpinitial = ctx.prec
ctx.prec = 15
tt = t/(2*ctx.pi)
wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt))
wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt))
ctx.prec = wptheta
theta = ctx.siegeltheta(t)
ctx.prec = wpz
rz = Rzeta_set(ctx,s, range(der+1))
if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2)
if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4)
if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8)
if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16)
exptheta = ctx.expj(theta)
if der == 0:
z = 2*exptheta*rz[0]
if der == 1:
zf = 2j*exptheta
z = zf*(ps1*rz[0]+rz[1])
if der == 2:
zf = 2 * exptheta
z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2)
if der == 3:
zf = -2j*exptheta
z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2]
z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3)
if der == 4:
zf = 2*exptheta
z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2]
z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2
z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4
z = zf*z
ctx.prec = wpinitial
return ctx._re(z)
def zeta_half(ctx, s, k=0):
"""
zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5
"""
wpinitial = ctx.prec
sigma = ctx._re(s)
t = ctx._im(s)
#--- compute wptheta, wpR, wpbasic ---
ctx.prec = 53
# X see II Section 3.21 (109) and (110)
if sigma > 0:
X = ctx.sqrt(abs(s))
else:
X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma)
# M1 see II Section 3.21 (111) and (112)
if sigma > 0:
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
else:
M1 = 4 * t * X
# T see II Section 3.21 (113)
abst = abs(0.5-s)
T = 2* abst*math.log(abst)
# computing wpbasic, wptheta, wpR see II Section 3.21
wpbasic = max(6,3+ctx.mag(t))
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1
wpbasic = max(wpbasic, wpbasic2)
wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1)
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
ctx.prec = wptheta
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2
if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4
if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8
if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16
ctx.prec = wpR
xrz = Rzeta_set(ctx,s,range(k+1))
yrz={}
for chi in range(0,k+1):
yrz[chi] = ctx.conj(xrz[chi])
ctx.prec = wpbasic
exptheta = ctx.expj(-2*theta)
if k==0:
zv = xrz[0]+exptheta*yrz[0]
if k==1:
zv1 = -yrz[1] - 2*yrz[0]*ps1
zv = xrz[1] + exptheta*zv1
if k==2:
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2
zv = xrz[2]+exptheta*zv1
if k==3:
zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
zv = xrz[3]+exptheta*zv1
if k == 4:
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
zv = xrz[4]+exptheta*zv1
ctx.prec = wpinitial
return zv
def zeta_offline(ctx, s, k=0):
"""
Computes zeta^(k)(s) off the line
"""
wpinitial = ctx.prec
sigma = ctx._re(s)
t = ctx._im(s)
#--- compute wptheta, wpR, wpbasic ---
ctx.prec = 53
# X see II Section 3.21 (109) and (110)
if sigma > 0:
X = ctx.power(abs(s), 0.5)
else:
X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma)
# M1 see II Section 3.21 (111) and (112)
if (sigma > 0):
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
else:
M1 = 4 * t * X
# M2 see II Section 3.21 (111) and (112)
if (1-sigma > 0):
M2 = 2*ctx.sqrt(t/(2*ctx.pi))
else:
M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5)
# T see II Section 3.21 (113)
abst = abs(0.5-s)
T = 2* abst*math.log(abst)
# computing wpbasic, wptheta, wpR see II Section 3.21
wpbasic = max(6,3+ctx.mag(t))
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1
wpbasic = max(wpbasic, wpbasic2)
wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1)
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
ctx.prec = wptheta
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
s1 = s
s2 = ctx.conj(1-s1)
ctx.prec = wpR
xrz, yrz = Rzeta_simul(ctx, s, k)
if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2
if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8
if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16
if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32
ctx.prec = wpbasic
exptheta = ctx.expj(-2*theta)
if k == 0:
zv = xrz[0]+exptheta*yrz[0]
if k == 1:
zv1 = -yrz[1]-2*yrz[0]*ps1
zv = xrz[1]+exptheta*zv1
if k == 2:
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2
zv = xrz[2]+exptheta*zv1
if k == 3:
zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
zv = xrz[3]+exptheta*zv1
if k == 4:
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
zv = xrz[4]+exptheta*zv1
ctx.prec = wpinitial
return zv
def z_offline(ctx, w, k=0):
r"""
Computes Z(w) and its derivatives off the line
"""
s = ctx.mpf('0.5')+ctx.j*w
s1 = s
s2 = ctx.conj(1-s1)
wpinitial = ctx.prec
ctx.prec = 35
# X see II Section 3.21 (109) and (110)
# M1 see II Section 3.21 (111) and (112)
if (ctx._re(s1) >= 0):
M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi))
X = ctx.sqrt(abs(s1))
else:
X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1))
M1 = 4 * ctx._im(s1)*X
# M2 see II Section 3.21 (111) and (112)
if (ctx._re(s2) >= 0):
M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi))
else:
M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2))
# T see II Section 3.21 Prop. 27
T = 2*abs(ctx.siegeltheta(w))
# defining some precisions
# see II Section 3.22 (115), (116), (117)
aux1 = ctx.sqrt(X)
aux2 = aux1*(M1+M2)
aux3 = 3 +wpinitial
wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3)
wptheta = max(4,ctx.mag(2.04*aux2)+aux3)
wpR = ctx.mag(4*aux1)+aux3
# now the computations
ctx.prec = wptheta
theta = ctx.siegeltheta(w)
ctx.prec = wpR
xrz, yrz = Rzeta_simul(ctx,s,k)
pta = 0.25 + 0.5j*w
ptb = 0.25 - 0.5j*w
if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2
if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb))
if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb))
if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb))
ctx.prec = wpbasic
exptheta = ctx.expj(theta)
if k == 0:
zv = exptheta*xrz[0]+yrz[0]/exptheta
j = ctx.j
if k == 1:
zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta
if k == 2:
zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2)
zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta
if k == 3:
zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2
zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta
zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2
zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta
zv = zv1+zv2
if k == 4:
zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2
zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2
zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3
zv1 = zv1+xrz[4]+j*xrz[0]*ps4
zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2
zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2
zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3
zv2 = zv2+yrz[4]-j*yrz[0]*ps4
zv = exptheta*zv1+zv2/exptheta
ctx.prec = wpinitial
return zv
@defun
def rs_zeta(ctx, s, derivative=0, **kwargs):
if derivative > 4:
raise NotImplementedError
s = ctx.convert(s)
re = ctx._re(s); im = ctx._im(s)
if im < 0:
z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative))
return z
critical_line = (re == 0.5)
if critical_line:
return zeta_half(ctx, s, derivative)
else:
return zeta_offline(ctx, s, derivative)
@defun
def rs_z(ctx, w, derivative=0):
w = ctx.convert(w)
re = ctx._re(w); im = ctx._im(w)
if re < 0:
return rs_z(ctx, -w, derivative)
critical_line = (im == 0)
if critical_line :
return z_half(ctx, w, derivative)
else:
return z_offline(ctx, w, derivative)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/rszeta.py
|
rszeta.py
|
from ..libmp.backend import xrange
class SpecialFunctions(object):
"""
This class implements special functions using high-level code.
Elementary and some other functions (e.g. gamma function, basecase
hypergeometric series) are assumed to be predefined by the context as
"builtins" or "low-level" functions.
"""
defined_functions = {}
# The series for the Jacobi theta functions converge for |q| < 1;
# in the current implementation they throw a ValueError for
# abs(q) > THETA_Q_LIM
THETA_Q_LIM = 1 - 10**-7
def __init__(self):
cls = self.__class__
for name in cls.defined_functions:
f, wrap = cls.defined_functions[name]
cls._wrap_specfun(name, f, wrap)
self.mpq_1 = self._mpq((1,1))
self.mpq_0 = self._mpq((0,1))
self.mpq_1_2 = self._mpq((1,2))
self.mpq_3_2 = self._mpq((3,2))
self.mpq_1_4 = self._mpq((1,4))
self.mpq_1_16 = self._mpq((1,16))
self.mpq_3_16 = self._mpq((3,16))
self.mpq_5_2 = self._mpq((5,2))
self.mpq_3_4 = self._mpq((3,4))
self.mpq_7_4 = self._mpq((7,4))
self.mpq_5_4 = self._mpq((5,4))
self.mpq_1_3 = self._mpq((1,3))
self.mpq_2_3 = self._mpq((2,3))
self.mpq_4_3 = self._mpq((4,3))
self.mpq_1_6 = self._mpq((1,6))
self.mpq_5_6 = self._mpq((5,6))
self.mpq_5_3 = self._mpq((5,3))
self._misc_const_cache = {}
self._aliases.update({
'phase' : 'arg',
'conjugate' : 'conj',
'nthroot' : 'root',
'polygamma' : 'psi',
'hurwitz' : 'zeta',
#'digamma' : 'psi0',
#'trigamma' : 'psi1',
#'tetragamma' : 'psi2',
#'pentagamma' : 'psi3',
'fibonacci' : 'fib',
'factorial' : 'fac',
})
self.zetazero_memoized = self.memoize(self.zetazero)
# Default -- do nothing
@classmethod
def _wrap_specfun(cls, name, f, wrap):
setattr(cls, name, f)
# Optional fast versions of common functions in common cases.
# If not overridden, default (generic hypergeometric series)
# implementations will be used
def _besselj(ctx, n, z): raise NotImplementedError
def _erf(ctx, z): raise NotImplementedError
def _erfc(ctx, z): raise NotImplementedError
def _gamma_upper_int(ctx, z, a): raise NotImplementedError
def _expint_int(ctx, n, z): raise NotImplementedError
def _zeta(ctx, s): raise NotImplementedError
def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError
def _ei(ctx, z): raise NotImplementedError
def _e1(ctx, z): raise NotImplementedError
def _ci(ctx, z): raise NotImplementedError
def _si(ctx, z): raise NotImplementedError
def _altzeta(ctx, s): raise NotImplementedError
def defun_wrapped(f):
SpecialFunctions.defined_functions[f.__name__] = f, True
def defun(f):
SpecialFunctions.defined_functions[f.__name__] = f, False
def defun_static(f):
setattr(SpecialFunctions, f.__name__, f)
@defun_wrapped
def cot(ctx, z): return ctx.one / ctx.tan(z)
@defun_wrapped
def sec(ctx, z): return ctx.one / ctx.cos(z)
@defun_wrapped
def csc(ctx, z): return ctx.one / ctx.sin(z)
@defun_wrapped
def coth(ctx, z): return ctx.one / ctx.tanh(z)
@defun_wrapped
def sech(ctx, z): return ctx.one / ctx.cosh(z)
@defun_wrapped
def csch(ctx, z): return ctx.one / ctx.sinh(z)
@defun_wrapped
def acot(ctx, z): return ctx.atan(ctx.one / z)
@defun_wrapped
def asec(ctx, z): return ctx.acos(ctx.one / z)
@defun_wrapped
def acsc(ctx, z): return ctx.asin(ctx.one / z)
@defun_wrapped
def acoth(ctx, z): return ctx.atanh(ctx.one / z)
@defun_wrapped
def asech(ctx, z): return ctx.acosh(ctx.one / z)
@defun_wrapped
def acsch(ctx, z): return ctx.asinh(ctx.one / z)
@defun
def sign(ctx, x):
x = ctx.convert(x)
if not x or ctx.isnan(x):
return x
if ctx._is_real_type(x):
if x > 0:
return ctx.one
else:
return -ctx.one
return x / abs(x)
@defun
def agm(ctx, a, b=1):
if b == 1:
return ctx.agm1(a)
a = ctx.convert(a)
b = ctx.convert(b)
return ctx._agm(a, b)
@defun_wrapped
def sinc(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sin(x)/x
@defun_wrapped
def sincpi(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sinpi(x)/(ctx.pi*x)
# TODO: tests; improve implementation
@defun_wrapped
def expm1(ctx, x):
if not x:
return ctx.zero
# exp(x) - 1 ~ x
if ctx.mag(x) < -ctx.prec:
return x + 0.5*x**2
# TODO: accurately eval the smaller of the real/imag parts
return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1)
@defun_wrapped
def powm1(ctx, x, y):
mag = ctx.mag
one = ctx.one
w = x**y - one
M = mag(w)
# Only moderate cancellation
if M > -8:
return w
# Check for the only possible exact cases
if not w:
if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)):
return w
x1 = x - one
magy = mag(y)
lnx = ctx.ln(x)
# Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2)
if magy + mag(lnx) < -ctx.prec:
return lnx*y + (lnx*y)**2/2
# TODO: accurately eval the smaller of the real/imag part
return ctx.sum_accurately(lambda: iter([x**y, -1]), 1)
@defun
def _rootof1(ctx, k, n):
k = int(k)
n = int(n)
k %= n
if not k:
return ctx.one
elif 2*k == n:
return -ctx.one
elif 4*k == n:
return ctx.j
elif 4*k == 3*n:
return -ctx.j
return ctx.expjpi(2*ctx.mpf(k)/n)
@defun
def root(ctx, x, n, k=0):
n = int(n)
x = ctx.convert(x)
if k:
# Special case: there is an exact real root
if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0):
return -ctx.root(-x, n)
# Multiply by root of unity
prec = ctx.prec
try:
ctx.prec += 10
v = ctx.root(x, n, 0) * ctx._rootof1(k, n)
finally:
ctx.prec = prec
return +v
return ctx._nthroot(x, n)
@defun
def unitroots(ctx, n, primitive=False):
gcd = ctx._gcd
prec = ctx.prec
try:
ctx.prec += 10
if primitive:
v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1]
else:
# TODO: this can be done *much* faster
v = [ctx._rootof1(k,n) for k in range(n)]
finally:
ctx.prec = prec
return [+x for x in v]
@defun
def arg(ctx, x):
x = ctx.convert(x)
re = ctx._re(x)
im = ctx._im(x)
return ctx.atan2(im, re)
@defun
def fabs(ctx, x):
return abs(ctx.convert(x))
@defun
def re(ctx, x):
x = ctx.convert(x)
if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers
return x.real
return x
@defun
def im(ctx, x):
x = ctx.convert(x)
if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers
return x.imag
return ctx.zero
@defun
def conj(ctx, x):
x = ctx.convert(x)
try:
return x.conjugate()
except AttributeError:
return x
@defun
def polar(ctx, z):
return (ctx.fabs(z), ctx.arg(z))
@defun_wrapped
def rect(ctx, r, phi):
return r * ctx.mpc(*ctx.cos_sin(phi))
@defun
def log(ctx, x, b=None):
if b is None:
return ctx.ln(x)
wp = ctx.prec + 20
return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp)
@defun
def log10(ctx, x):
return ctx.log(x, 10)
@defun
def fmod(ctx, x, y):
return ctx.convert(x) % ctx.convert(y)
@defun
def degrees(ctx, x):
return x / ctx.degree
@defun
def radians(ctx, x):
return x * ctx.degree
def _lambertw_special(ctx, z, k):
# W(0,0) = 0; all other branches are singular
if not z:
if not k:
return z
return ctx.ninf + z
if z == ctx.inf:
if k == 0:
return z
else:
return z + 2*k*ctx.pi*ctx.j
if z == ctx.ninf:
return (-z) + (2*k+1)*ctx.pi*ctx.j
# Some kind of nan or complex inf/nan?
return ctx.ln(z)
import math
import cmath
def _lambertw_approx_hybrid(z, k):
imag_sign = 0
if hasattr(z, "imag"):
x = float(z.real)
y = z.imag
if y:
imag_sign = (-1) ** (y < 0)
y = float(y)
else:
x = float(z)
y = 0.0
imag_sign = 0
# hack to work regardless of whether Python supports -0.0
if not y:
y = 0.0
z = complex(x,y)
if k == 0:
if -4.0 < y < 4.0 and -1.0 < x < 2.5:
if imag_sign:
# Taylor series in upper/lower half-plane
if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j))
if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j))
if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j))
if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j))
# Taylor series near -1
if x < -0.5:
if imag_sign >= 0:
return (-0.318+1.34j) + (-0.697-0.593j)*(z+1)
else:
return (-0.318-1.34j) + (-0.697+0.593j)*(z+1)
# return real type
r = -0.367879441171442
if (not imag_sign) and x > r:
z = x
# Singularity near -1/e
if x < -0.2:
return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
# Taylor series near 0
if x < 0.5: return z
# Simple linear approximation
return 0.2 + 0.3*z
if (not imag_sign) and x > 0.0:
L1 = math.log(x); L2 = math.log(L1)
else:
L1 = cmath.log(z); L2 = cmath.log(L1)
elif k == -1:
# return real type
r = -0.367879441171442
if (not imag_sign) and r < x < 0.0:
z = x
if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2:
return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
if (not imag_sign) and -0.2 <= x < 0.0:
L1 = math.log(-x)
return L1 - math.log(-L1)
else:
if imag_sign == -1 and (not y) and x < 0.0:
L1 = cmath.log(z) - 3.1415926535897932j
else:
L1 = cmath.log(z) - 6.2831853071795865j
L2 = cmath.log(L1)
return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2)
def _lambertw_series(ctx, z, k, tol):
"""
Return rough approximation for W_k(z) from an asymptotic series,
sufficiently accurate for the Halley iteration to converge to
the correct value.
"""
magz = ctx.mag(z)
if (-10 < magz < 900) and (-1000 < k < 1000):
# Near the branch point at -1/e
if magz < 1 and abs(z+0.36787944117144) < 0.05:
if k == 0 or (k == -1 and ctx._im(z) >= 0) or \
(k == 1 and ctx._im(z) < 0):
delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)])
cancellation = -ctx.mag(delta)
ctx.prec += cancellation
# Use series given in Corless et al.
p = ctx.sqrt(2*(ctx.e*z+1))
ctx.prec -= cancellation
u = {0:ctx.mpf(-1), 1:ctx.mpf(1)}
a = {0:ctx.mpf(2), 1:ctx.mpf(-1)}
if k != 0:
p = -p
s = ctx.zero
# The series converges, so we could use it directly, but unless
# *extremely* close, it is better to just use the first few
# terms to get a good approximation for the iteration
for l in xrange(max(2,cancellation)):
if l not in u:
a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l))
u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1)
term = u[l] * p**l
s += term
if ctx.mag(term) < -tol:
return s, True
l += 1
ctx.prec += cancellation//2
return s, False
if k == 0 or k == -1:
return _lambertw_approx_hybrid(z, k), False
if k == 0:
if magz < -1:
return z*(1-z), False
L1 = ctx.ln(z)
L2 = ctx.ln(L1)
elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0):
L1 = ctx.ln(-z)
return L1 - ctx.ln(-L1), False
else:
# This holds both as z -> 0 and z -> inf.
# Relative error is O(1/log(z)).
L1 = ctx.ln(z) + 2j*ctx.pi*k
L2 = ctx.ln(L1)
return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False
@defun
def lambertw(ctx, z, k=0):
z = ctx.convert(z)
k = int(k)
if not ctx.isnormal(z):
return _lambertw_special(ctx, z, k)
prec = ctx.prec
ctx.prec += 20 + ctx.mag(k or 1)
wp = ctx.prec
tol = wp - 5
w, done = _lambertw_series(ctx, z, k, tol)
if not done:
# Use Halley iteration to solve w*exp(w) = z
two = ctx.mpf(2)
for i in xrange(100):
ew = ctx.exp(w)
wew = w*ew
wewz = wew-z
wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
if ctx.mag(wn-w) <= ctx.mag(wn) - tol:
w = wn
break
else:
w = wn
if i == 100:
ctx.warn("Lambert W iteration failed to converge for z = %s" % z)
ctx.prec = prec
return +w
@defun_wrapped
def bell(ctx, n, x=1):
x = ctx.convert(x)
if not n:
if ctx.isnan(x):
return x
return type(x)(1)
if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n):
return x**n
if n == 1: return x
if n == 2: return x*(x+1)
if x == 0: return ctx.sincpi(n)
return _polyexp(ctx, n, x, True) / ctx.exp(x)
def _polyexp(ctx, n, x, extra=False):
def _terms():
if extra:
yield ctx.sincpi(n)
t = x
k = 1
while 1:
yield k**n * t
k += 1
t = t*x/k
return ctx.sum_accurately(_terms, check_step=4)
@defun_wrapped
def polyexp(ctx, s, z):
if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s):
return z**s
if z == 0: return z*s
if s == 0: return ctx.expm1(z)
if s == 1: return ctx.exp(z)*z
if s == 2: return ctx.exp(z)*z*(z+1)
return _polyexp(ctx, s, z)
@defun_wrapped
def cyclotomic(ctx, n, z):
n = int(n)
assert n >= 0
p = ctx.one
if n == 0:
return p
if n == 1:
return z - p
if n == 2:
return z + p
# Use divisor product representation. Unfortunately, this sometimes
# includes singularities for roots of unity, which we have to cancel out.
# Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1).
a_prod = 1
b_prod = 1
num_zeros = 0
num_poles = 0
for d in range(1,n+1):
if not n % d:
w = ctx.moebius(n//d)
# Use powm1 because it is important that we get 0 only
# if it really is exactly 0
b = -ctx.powm1(z, d)
if b:
p *= b**w
else:
if w == 1:
a_prod *= d
num_zeros += 1
elif w == -1:
b_prod *= d
num_poles += 1
#print n, num_zeros, num_poles
if num_zeros:
if num_zeros > num_poles:
p *= 0
else:
p *= a_prod
p /= b_prod
return p
@defun
def mangoldt(ctx, n):
r"""
Evaluates the von Mangoldt function `\Lambda(n) = \log p`
if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise.
**Examples**
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> [mangoldt(n) for n in range(-2,3)]
[0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321]
>>> mangoldt(6)
0.0
>>> mangoldt(7)
1.945910149055313305105353
>>> mangoldt(8)
0.6931471805599453094172321
>>> fsum(mangoldt(n) for n in range(101))
94.04531122935739224600493
>>> fsum(mangoldt(n) for n in range(10001))
10013.39669326311478372032
"""
n = int(n)
if n < 2:
return ctx.zero
if n % 2 == 0:
# Must be a power of two
if n & (n-1) == 0:
return +ctx.ln2
else:
return ctx.zero
# TODO: the following could be generalized into a perfect
# power testing function
# ---
# Look for a small factor
for p in (3,5,7,11,13,17,19,23,29,31):
if not n % p:
q, r = n // p, 0
while q > 1:
q, r = divmod(q, p)
if r:
return ctx.zero
return ctx.ln(p)
if ctx.isprime(n):
return ctx.ln(n)
# Obviously, we could use arbitrary-precision arithmetic for this...
if n > 10**30:
raise NotImplementedError
k = 2
while 1:
p = int(n**(1./k) + 0.5)
if p < 2:
return ctx.zero
if p ** k == n:
if ctx.isprime(p):
return ctx.ln(p)
k += 1
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/functions.py
|
functions.py
|
from ..libmp.backend import xrange
from .functions import defun, defun_wrapped, defun_static
@defun
def stieltjes(ctx, n, a=1):
n = ctx.convert(n)
a = ctx.convert(a)
if n < 0:
return ctx.bad_domain("Stieltjes constants defined for n >= 0")
if hasattr(ctx, "stieltjes_cache"):
stieltjes_cache = ctx.stieltjes_cache
else:
stieltjes_cache = ctx.stieltjes_cache = {}
if a == 1:
if n == 0:
return +ctx.euler
if n in stieltjes_cache:
prec, s = stieltjes_cache[n]
if prec >= ctx.prec:
return +s
mag = 1
def f(x):
xa = x/a
v = (xa-ctx.j)*ctx.ln(a-ctx.j*x)**n/(1+xa**2)/(ctx.exp(2*ctx.pi*x)-1)
return ctx._re(v) / mag
orig = ctx.prec
try:
# Normalize integrand by approx. magnitude to
# speed up quadrature (which uses absolute error)
if n > 50:
ctx.prec = 20
mag = ctx.quad(f, [0,ctx.inf], maxdegree=3)
ctx.prec = orig + 10 + int(n**0.5)
s = ctx.quad(f, [0,ctx.inf], maxdegree=20)
v = ctx.ln(a)**n/(2*a) - ctx.ln(a)**(n+1)/(n+1) + 2*s/a*mag
finally:
ctx.prec = orig
if a == 1 and ctx.isint(n):
stieltjes_cache[n] = (ctx.prec, v)
return +v
@defun_wrapped
def siegeltheta(ctx, t, derivative=0):
d = int(derivative)
if (t == ctx.inf or t == ctx.ninf):
if d < 2:
if t == ctx.ninf and d == 0:
return ctx.ninf
return ctx.inf
else:
return ctx.zero
if d == 0:
if ctx._im(t):
# XXX: cancellation occurs
a = ctx.loggamma(0.25+0.5j*t)
b = ctx.loggamma(0.25-0.5j*t)
return -ctx.ln(ctx.pi)/2*t - 0.5j*(a-b)
else:
if ctx.isinf(t):
return t
return ctx._im(ctx.loggamma(0.25+0.5j*t)) - ctx.ln(ctx.pi)/2*t
if d > 0:
a = (-0.5j)**(d-1)*ctx.polygamma(d-1, 0.25-0.5j*t)
b = (0.5j)**(d-1)*ctx.polygamma(d-1, 0.25+0.5j*t)
if ctx._im(t):
if d == 1:
return -0.5*ctx.log(ctx.pi)+0.25*(a+b)
else:
return 0.25*(a+b)
else:
if d == 1:
return ctx._re(-0.5*ctx.log(ctx.pi)+0.25*(a+b))
else:
return ctx._re(0.25*(a+b))
@defun_wrapped
def grampoint(ctx, n):
# asymptotic expansion, from
# http://mathworld.wolfram.com/GramPoint.html
g = 2*ctx.pi*ctx.exp(1+ctx.lambertw((8*n+1)/(8*ctx.e)))
return ctx.findroot(lambda t: ctx.siegeltheta(t)-ctx.pi*n, g)
@defun_wrapped
def siegelz(ctx, t, **kwargs):
d = int(kwargs.get("derivative", 0))
t = ctx.convert(t)
t1 = ctx._re(t)
t2 = ctx._im(t)
prec = ctx.prec
try:
if abs(t1) > 500*prec and t2**2 < t1:
v = ctx.rs_z(t, d)
if ctx._is_real_type(t):
return ctx._re(v)
return v
except NotImplementedError:
pass
ctx.prec += 21
e1 = ctx.expj(ctx.siegeltheta(t))
z = ctx.zeta(0.5+ctx.j*t)
if d == 0:
v = e1*z
ctx.prec=prec
if ctx._is_real_type(t):
return ctx._re(v)
return +v
z1 = ctx.zeta(0.5+ctx.j*t, derivative=1)
theta1 = ctx.siegeltheta(t, derivative=1)
if d == 1:
v = ctx.j*e1*(z1+z*theta1)
ctx.prec=prec
if ctx._is_real_type(t):
return ctx._re(v)
return +v
z2 = ctx.zeta(0.5+ctx.j*t, derivative=2)
theta2 = ctx.siegeltheta(t, derivative=2)
comb1 = theta1**2-ctx.j*theta2
if d == 2:
def terms():
return [2*z1*theta1, z2, z*comb1]
v = ctx.sum_accurately(terms, 1)
v = -e1*v
ctx.prec = prec
if ctx._is_real_type(t):
return ctx._re(v)
return +v
ctx.prec += 10
z3 = ctx.zeta(0.5+ctx.j*t, derivative=3)
theta3 = ctx.siegeltheta(t, derivative=3)
comb2 = theta1**3-3*ctx.j*theta1*theta2-theta3
if d == 3:
def terms():
return [3*theta1*z2, 3*z1*comb1, z3+z*comb2]
v = ctx.sum_accurately(terms, 1)
v = -ctx.j*e1*v
ctx.prec = prec
if ctx._is_real_type(t):
return ctx._re(v)
return +v
z4 = ctx.zeta(0.5+ctx.j*t, derivative=4)
theta4 = ctx.siegeltheta(t, derivative=4)
def terms():
return [theta1**4, -6*ctx.j*theta1**2*theta2, -3*theta2**2,
-4*theta1*theta3, ctx.j*theta4]
comb3 = ctx.sum_accurately(terms, 1)
if d == 4:
def terms():
return [6*theta1**2*z2, -6*ctx.j*z2*theta2, 4*theta1*z3,
4*z1*comb2, z4, z*comb3]
v = ctx.sum_accurately(terms, 1)
v = e1*v
ctx.prec = prec
if ctx._is_real_type(t):
return ctx._re(v)
return +v
if d > 4:
h = lambda x: ctx.siegelz(x, derivative=4)
return ctx.diff(h, t, n=d-4)
_zeta_zeros = [
14.134725142,21.022039639,25.010857580,30.424876126,32.935061588,
37.586178159,40.918719012,43.327073281,48.005150881,49.773832478,
52.970321478,56.446247697,59.347044003,60.831778525,65.112544048,
67.079810529,69.546401711,72.067157674,75.704690699,77.144840069,
79.337375020,82.910380854,84.735492981,87.425274613,88.809111208,
92.491899271,94.651344041,95.870634228,98.831194218,101.317851006,
103.725538040,105.446623052,107.168611184,111.029535543,111.874659177,
114.320220915,116.226680321,118.790782866,121.370125002,122.946829294,
124.256818554,127.516683880,129.578704200,131.087688531,133.497737203,
134.756509753,138.116042055,139.736208952,141.123707404,143.111845808,
146.000982487,147.422765343,150.053520421,150.925257612,153.024693811,
156.112909294,157.597591818,158.849988171,161.188964138,163.030709687,
165.537069188,167.184439978,169.094515416,169.911976479,173.411536520,
174.754191523,176.441434298,178.377407776,179.916484020,182.207078484,
184.874467848,185.598783678,187.228922584,189.416158656,192.026656361,
193.079726604,195.265396680,196.876481841,198.015309676,201.264751944,
202.493594514,204.189671803,205.394697202,207.906258888,209.576509717,
211.690862595,213.347919360,214.547044783,216.169538508,219.067596349,
220.714918839,221.430705555,224.007000255,224.983324670,227.421444280,
229.337413306,231.250188700,231.987235253,233.693404179,236.524229666,
]
def _load_zeta_zeros(url):
import urllib
d = urllib.urlopen(url)
L = [float(x) for x in d.readlines()]
# Sanity check
assert round(L[0]) == 14
_zeta_zeros[:] = L
@defun
def oldzetazero(ctx, n, url='http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros1'):
n = int(n)
if n < 0:
return ctx.zetazero(-n).conjugate()
if n == 0:
raise ValueError("n must be nonzero")
if n > len(_zeta_zeros) and n <= 100000:
_load_zeta_zeros(url)
if n > len(_zeta_zeros):
raise NotImplementedError("n too large for zetazeros")
return ctx.mpc(0.5, ctx.findroot(ctx.siegelz, _zeta_zeros[n-1]))
@defun_wrapped
def riemannr(ctx, x):
if x == 0:
return ctx.zero
# Check if a simple asymptotic estimate is accurate enough
if abs(x) > 1000:
a = ctx.li(x)
b = 0.5*ctx.li(ctx.sqrt(x))
if abs(b) < abs(a)*ctx.eps:
return a
if abs(x) < 0.01:
# XXX
ctx.prec += int(-ctx.log(abs(x),2))
# Sum Gram's series
s = t = ctx.one
u = ctx.ln(x)
k = 1
while abs(t) > abs(s)*ctx.eps:
t = t * u / k
s += t / (k * ctx._zeta_int(k+1))
k += 1
return s
@defun_static
def primepi(ctx, x):
x = int(x)
if x < 2:
return 0
return len(ctx.list_primes(x))
# TODO: fix the interface wrt contexts
@defun_wrapped
def primepi2(ctx, x):
x = int(x)
if x < 2:
return ctx._iv.zero
if x < 2657:
return ctx._iv.mpf(ctx.primepi(x))
mid = ctx.li(x)
# Schoenfeld's estimate for x >= 2657, assuming RH
err = ctx.sqrt(x,rounding='u')*ctx.ln(x,rounding='u')/8/ctx.pi(rounding='d')
a = ctx.floor((ctx._iv.mpf(mid)-err).a, rounding='d')
b = ctx.ceil((ctx._iv.mpf(mid)+err).b, rounding='u')
return ctx._iv.mpf([a,b])
@defun_wrapped
def primezeta(ctx, s):
if ctx.isnan(s):
return s
if ctx.re(s) <= 0:
raise ValueError("prime zeta function defined only for re(s) > 0")
if s == 1:
return ctx.inf
if s == 0.5:
return ctx.mpc(ctx.ninf, ctx.pi)
r = ctx.re(s)
if r > ctx.prec:
return 0.5**s
else:
wp = ctx.prec + int(r)
def terms():
orig = ctx.prec
# zeta ~ 1+eps; need to set precision
# to get logarithm accurately
k = 0
while 1:
k += 1
u = ctx.moebius(k)
if not u:
continue
ctx.prec = wp
t = u*ctx.ln(ctx.zeta(k*s))/k
if not t:
return
#print ctx.prec, ctx.nstr(t)
ctx.prec = orig
yield t
return ctx.sum_accurately(terms)
# TODO: for bernpoly and eulerpoly, ensure that all exact zeros are covered
@defun_wrapped
def bernpoly(ctx, n, z):
# Slow implementation:
#return sum(ctx.binomial(n,k)*ctx.bernoulli(k)*z**(n-k) for k in xrange(0,n+1))
n = int(n)
if n < 0:
raise ValueError("Bernoulli polynomials only defined for n >= 0")
if z == 0 or (z == 1 and n > 1):
return ctx.bernoulli(n)
if z == 0.5:
return (ctx.ldexp(1,1-n)-1)*ctx.bernoulli(n)
if n <= 3:
if n == 0: return z ** 0
if n == 1: return z - 0.5
if n == 2: return (6*z*(z-1)+1)/6
if n == 3: return z*(z*(z-1.5)+0.5)
if ctx.isinf(z):
return z ** n
if ctx.isnan(z):
return z
if abs(z) > 2:
def terms():
t = ctx.one
yield t
r = ctx.one/z
k = 1
while k <= n:
t = t*(n+1-k)/k*r
if not (k > 2 and k & 1):
yield t*ctx.bernoulli(k)
k += 1
return ctx.sum_accurately(terms) * z**n
else:
def terms():
yield ctx.bernoulli(n)
t = ctx.one
k = 1
while k <= n:
t = t*(n+1-k)/k * z
m = n-k
if not (m > 2 and m & 1):
yield t*ctx.bernoulli(m)
k += 1
return ctx.sum_accurately(terms)
@defun_wrapped
def eulerpoly(ctx, n, z):
n = int(n)
if n < 0:
raise ValueError("Euler polynomials only defined for n >= 0")
if n <= 2:
if n == 0: return z ** 0
if n == 1: return z - 0.5
if n == 2: return z*(z-1)
if ctx.isinf(z):
return z**n
if ctx.isnan(z):
return z
m = n+1
if z == 0:
return -2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0
if z == 1:
return 2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0
if z == 0.5:
if n % 2:
return ctx.zero
# Use exact code for Euler numbers
if n < 100 or n*ctx.mag(0.46839865*n) < ctx.prec*0.25:
return ctx.ldexp(ctx._eulernum(n), -n)
# http://functions.wolfram.com/Polynomials/EulerE2/06/01/02/01/0002/
def terms():
t = ctx.one
k = 0
w = ctx.ldexp(1,n+2)
while 1:
v = n-k+1
if not (v > 2 and v & 1):
yield (2-w)*ctx.bernoulli(v)*t
k += 1
if k > n:
break
t = t*z*(n-k+2)/k
w *= 0.5
return ctx.sum_accurately(terms) / m
@defun
def eulernum(ctx, n, exact=False):
n = int(n)
if exact:
return int(ctx._eulernum(n))
if n < 100:
return ctx.mpf(ctx._eulernum(n))
if n % 2:
return ctx.zero
return ctx.ldexp(ctx.eulerpoly(n,0.5), n)
# TODO: this should be implemented low-level
def polylog_series(ctx, s, z):
tol = +ctx.eps
l = ctx.zero
k = 1
zk = z
while 1:
term = zk / k**s
l += term
if abs(term) < tol:
break
zk *= z
k += 1
return l
def polylog_continuation(ctx, n, z):
if n < 0:
return z*0
twopij = 2j * ctx.pi
a = -twopij**n/ctx.fac(n) * ctx.bernpoly(n, ctx.ln(z)/twopij)
if ctx._is_real_type(z) and z < 0:
a = ctx._re(a)
if ctx._im(z) < 0 or (ctx._im(z) == 0 and ctx._re(z) >= 1):
a -= twopij*ctx.ln(z)**(n-1)/ctx.fac(n-1)
return a
def polylog_unitcircle(ctx, n, z):
tol = +ctx.eps
if n > 1:
l = ctx.zero
logz = ctx.ln(z)
logmz = ctx.one
m = 0
while 1:
if (n-m) != 1:
term = ctx.zeta(n-m) * logmz / ctx.fac(m)
if term and abs(term) < tol:
break
l += term
logmz *= logz
m += 1
l += ctx.ln(z)**(n-1)/ctx.fac(n-1)*(ctx.harmonic(n-1)-ctx.ln(-ctx.ln(z)))
elif n < 1: # else
l = ctx.fac(-n)*(-ctx.ln(z))**(n-1)
logz = ctx.ln(z)
logkz = ctx.one
k = 0
while 1:
b = ctx.bernoulli(k-n+1)
if b:
term = b*logkz/(ctx.fac(k)*(k-n+1))
if abs(term) < tol:
break
l -= term
logkz *= logz
k += 1
else:
raise ValueError
if ctx._is_real_type(z) and z < 0:
l = ctx._re(l)
return l
def polylog_general(ctx, s, z):
v = ctx.zero
u = ctx.ln(z)
if not abs(u) < 5: # theoretically |u| < 2*pi
raise NotImplementedError("polylog for arbitrary s and z")
t = 1
k = 0
while 1:
term = ctx.zeta(s-k) * t
if abs(term) < ctx.eps:
break
v += term
k += 1
t *= u
t /= k
return ctx.gamma(1-s)*(-u)**(s-1) + v
@defun_wrapped
def polylog(ctx, s, z):
s = ctx.convert(s)
z = ctx.convert(z)
if z == 1:
return ctx.zeta(s)
if z == -1:
return -ctx.altzeta(s)
if s == 0:
return z/(1-z)
if s == 1:
return -ctx.ln(1-z)
if s == -1:
return z/(1-z)**2
if abs(z) <= 0.75 or (not ctx.isint(s) and abs(z) < 0.9):
return polylog_series(ctx, s, z)
if abs(z) >= 1.4 and ctx.isint(s):
return (-1)**(s+1)*polylog_series(ctx, s, 1/z) + polylog_continuation(ctx, s, z)
if ctx.isint(s):
return polylog_unitcircle(ctx, int(s), z)
return polylog_general(ctx, s, z)
#raise NotImplementedError("polylog for arbitrary s and z")
# This could perhaps be used in some cases
#from quadrature import quad
#return quad(lambda t: t**(s-1)/(exp(t)/z-1),[0,inf])/gamma(s)
@defun_wrapped
def clsin(ctx, s, z, pi=False):
if ctx.isint(s) and s < 0 and int(s) % 2 == 1:
return z*0
if pi:
a = ctx.expjpi(z)
else:
a = ctx.expj(z)
if ctx._is_real_type(z) and ctx._is_real_type(s):
return ctx.im(ctx.polylog(s,a))
b = 1/a
return (-0.5j)*(ctx.polylog(s,a) - ctx.polylog(s,b))
@defun_wrapped
def clcos(ctx, s, z, pi=False):
if ctx.isint(s) and s < 0 and int(s) % 2 == 0:
return z*0
if pi:
a = ctx.expjpi(z)
else:
a = ctx.expj(z)
if ctx._is_real_type(z) and ctx._is_real_type(s):
return ctx.re(ctx.polylog(s,a))
b = 1/a
return 0.5*(ctx.polylog(s,a) + ctx.polylog(s,b))
@defun
def altzeta(ctx, s, **kwargs):
try:
return ctx._altzeta(s, **kwargs)
except NotImplementedError:
return ctx._altzeta_generic(s)
@defun_wrapped
def _altzeta_generic(ctx, s):
if s == 1:
return ctx.ln2 + 0*s
return -ctx.powm1(2, 1-s) * ctx.zeta(s)
@defun
def zeta(ctx, s, a=1, derivative=0, method=None, **kwargs):
d = int(derivative)
if a == 1 and not (d or method):
try:
return ctx._zeta(s, **kwargs)
except NotImplementedError:
pass
s = ctx.convert(s)
prec = ctx.prec
method = kwargs.get('method')
verbose = kwargs.get('verbose')
if a == 1 and method != 'euler-maclaurin':
im = abs(ctx._im(s))
re = abs(ctx._re(s))
#if (im < prec or method == 'borwein') and not derivative:
# try:
# if verbose:
# print "zeta: Attempting to use the Borwein algorithm"
# return ctx._zeta(s, **kwargs)
# except NotImplementedError:
# if verbose:
# print "zeta: Could not use the Borwein algorithm"
# pass
if abs(im) > 500*prec and 10*re < prec and derivative <= 4 or \
method == 'riemann-siegel':
try: # py2.4 compatible try block
try:
if verbose:
print("zeta: Attempting to use the Riemann-Siegel algorithm")
return ctx.rs_zeta(s, derivative, **kwargs)
except NotImplementedError:
if verbose:
print("zeta: Could not use the Riemann-Siegel algorithm")
pass
finally:
ctx.prec = prec
if s == 1:
return ctx.inf
abss = abs(s)
if abss == ctx.inf:
if ctx.re(s) == ctx.inf:
if d == 0:
return ctx.one
return ctx.zero
return s*0
elif ctx.isnan(abss):
return 1/s
if ctx.re(s) > 2*ctx.prec and a == 1 and not derivative:
return ctx.one + ctx.power(2, -s)
return +ctx._hurwitz(s, a, d, **kwargs)
@defun
def _hurwitz(ctx, s, a=1, d=0, **kwargs):
prec = ctx.prec
verbose = kwargs.get('verbose')
try:
extraprec = 10
ctx.prec += extraprec
# We strongly want to special-case rational a
a, atype = ctx._convert_param(a)
if ctx.re(s) < 0:
if verbose:
print("zeta: Attempting reflection formula")
try:
return _hurwitz_reflection(ctx, s, a, d, atype)
except NotImplementedError:
pass
if verbose:
print("zeta: Reflection formula failed")
if verbose:
print("zeta: Using the Euler-Maclaurin algorithm")
while 1:
ctx.prec = prec + extraprec
T1, T2 = _hurwitz_em(ctx, s, a, d, prec+10, verbose)
cancellation = ctx.mag(T1) - ctx.mag(T1+T2)
if verbose:
print("Term 1:", T1)
print("Term 2:", T2)
print("Cancellation:", cancellation, "bits")
if cancellation < extraprec:
return T1 + T2
else:
extraprec = max(2*extraprec, min(cancellation + 5, 100*prec))
if extraprec > kwargs.get('maxprec', 100*prec):
raise ctx.NoConvergence("zeta: too much cancellation")
finally:
ctx.prec = prec
def _hurwitz_reflection(ctx, s, a, d, atype):
# TODO: implement for derivatives
if d != 0:
raise NotImplementedError
res = ctx.re(s)
negs = -s
# Integer reflection formula
if ctx.isnpint(s):
n = int(res)
if n <= 0:
return ctx.bernpoly(1-n, a) / (n-1)
t = 1-s
# We now require a to be standardized
v = 0
shift = 0
b = a
while ctx.re(b) > 1:
b -= 1
v -= b**negs
shift -= 1
while ctx.re(b) <= 0:
v += b**negs
b += 1
shift += 1
# Rational reflection formula
if atype == 'Q' or atype == 'Z':
try:
p, q = a._mpq_
except:
assert a == int(a)
p = int(a)
q = 1
p += shift*q
assert 1 <= p <= q
g = ctx.fsum(ctx.cospi(t/2-2*k*b)*ctx._hurwitz(t,(k,q)) \
for k in range(1,q+1))
g *= 2*ctx.gamma(t)/(2*ctx.pi*q)**t
v += g
return v
# General reflection formula
# Note: clcos/clsin can raise NotImplementedError
else:
C1, C2 = ctx.cospi_sinpi(0.5*t)
# Clausen functions; could maybe use polylog directly
if C1: C1 *= ctx.clcos(t, 2*a, pi=True)
if C2: C2 *= ctx.clsin(t, 2*a, pi=True)
v += 2*ctx.gamma(t)/(2*ctx.pi)**t*(C1+C2)
return v
def _hurwitz_em(ctx, s, a, d, prec, verbose):
# May not be converted at this point
a = ctx.convert(a)
tol = -prec
# Estimate number of terms for Euler-Maclaurin summation; could be improved
M1 = 0
M2 = prec // 3
N = M2
lsum = 0
# This speeds up the recurrence for derivatives
if ctx.isint(s):
s = int(ctx._re(s))
s1 = s-1
while 1:
# Truncated L-series
l = ctx._zetasum(s, M1+a, M2-M1-1, [d])[0][0]
#if d:
# l = ctx.fsum((-ctx.ln(n+a))**d * (n+a)**negs for n in range(M1,M2))
#else:
# l = ctx.fsum((n+a)**negs for n in range(M1,M2))
lsum += l
M2a = M2+a
logM2a = ctx.ln(M2a)
logM2ad = logM2a**d
logs = [logM2ad]
logr = 1/logM2a
rM2a = 1/M2a
M2as = rM2a**s
if d:
tailsum = ctx.gammainc(d+1, s1*logM2a) / s1**(d+1)
else:
tailsum = 1/((s1)*(M2a)**s1)
tailsum += 0.5 * logM2ad * M2as
U = [1]
r = M2as
fact = 2
for j in range(1, N+1):
# TODO: the following could perhaps be tidied a bit
j2 = 2*j
if j == 1:
upds = [1]
else:
upds = [j2-2, j2-1]
for m in upds:
D = min(m,d+1)
if m <= d:
logs.append(logs[-1] * logr)
Un = [0]*(D+1)
for i in xrange(D): Un[i] = (1-m-s)*U[i]
for i in xrange(1,D+1): Un[i] += (d-(i-1))*U[i-1]
U = Un
r *= rM2a
t = ctx.fdot(U, logs) * r * ctx.bernoulli(j2)/(-fact)
tailsum += t
if ctx.mag(t) < tol:
return lsum, (-1)**d * tailsum
fact *= (j2+1)*(j2+2)
if verbose:
print("Sum range:", M1, M2, "term magnitude", ctx.mag(t), "tolerance", tol)
M1, M2 = M2, M2*2
if ctx.re(s) < 0:
N += N//2
@defun
def _zetasum(ctx, s, a, n, derivatives=[0], reflect=False):
"""
Returns [xd0,xd1,...,xdr], [yd0,yd1,...ydr] where
xdk = D^k ( 1/a^s + 1/(a+1)^s + ... + 1/(a+n)^s )
ydk = D^k conj( 1/a^(1-s) + 1/(a+1)^(1-s) + ... + 1/(a+n)^(1-s) )
D^k = kth derivative with respect to s, k ranges over the given list of
derivatives (which should consist of either a single element
or a range 0,1,...r). If reflect=False, the ydks are not computed.
"""
#print "zetasum", s, a, n
try:
return ctx._zetasum_fast(s, a, n, derivatives, reflect)
except NotImplementedError:
pass
negs = ctx.fneg(s, exact=True)
have_derivatives = derivatives != [0]
have_one_derivative = len(derivatives) == 1
if not reflect:
if not have_derivatives:
return [ctx.fsum((a+k)**negs for k in xrange(n+1))], []
if have_one_derivative:
d = derivatives[0]
x = ctx.fsum(ctx.ln(a+k)**d * (a+k)**negs for k in xrange(n+1))
return [(-1)**d * x], []
maxd = max(derivatives)
if not have_one_derivative:
derivatives = range(maxd+1)
xs = [ctx.zero for d in derivatives]
if reflect:
ys = [ctx.zero for d in derivatives]
else:
ys = []
for k in xrange(n+1):
w = a + k
xterm = w ** negs
if reflect:
yterm = ctx.conj(ctx.one / (w * xterm))
if have_derivatives:
logw = -ctx.ln(w)
if have_one_derivative:
logw = logw ** maxd
xs[0] += xterm * logw
if reflect:
ys[0] += yterm * logw
else:
t = ctx.one
for d in derivatives:
xs[d] += xterm * t
if reflect:
ys[d] += yterm * t
t *= logw
else:
xs[0] += xterm
if reflect:
ys[0] += yterm
return xs, ys
@defun
def dirichlet(ctx, s, chi=[1], derivative=0):
s = ctx.convert(s)
q = len(chi)
d = int(derivative)
if d > 2:
raise NotImplementedError("arbitrary order derivatives")
prec = ctx.prec
try:
ctx.prec += 10
if s == 1:
have_pole = True
for x in chi:
if x and x != 1:
have_pole = False
h = +ctx.eps
ctx.prec *= 2*(d+1)
s += h
if have_pole:
return +ctx.inf
z = ctx.zero
for p in range(1,q+1):
if chi[p%q]:
if d == 1:
z += chi[p%q] * (ctx.zeta(s, (p,q), 1) - \
ctx.zeta(s, (p,q))*ctx.log(q))
else:
z += chi[p%q] * ctx.zeta(s, (p,q))
z /= q**s
finally:
ctx.prec = prec
return +z
def secondzeta_main_term(ctx, s, a, **kwargs):
tol = ctx.eps
f = lambda n: ctx.gammainc(0.5*s, a*gamm**2, regularized=True)*gamm**(-s)
totsum = term = ctx.zero
mg = ctx.inf
n = 0
while mg > tol:
totsum += term
n += 1
gamm = ctx.im(ctx.zetazero_memoized(n))
term = f(n)
mg = abs(term)
err = 0
if kwargs.get("error"):
sg = ctx.re(s)
err = 0.5*ctx.pi**(-1)*max(1,sg)*a**(sg-0.5)*ctx.log(gamm/(2*ctx.pi))*\
ctx.gammainc(-0.5, a*gamm**2)/abs(ctx.gamma(s/2))
err = abs(err)
return +totsum, err, n
def secondzeta_prime_term(ctx, s, a, **kwargs):
tol = ctx.eps
f = lambda n: ctx.gammainc(0.5*(1-s),0.25*ctx.log(n)**2 * a**(-1))*\
((0.5*ctx.log(n))**(s-1))*ctx.mangoldt(n)/ctx.sqrt(n)/\
(2*ctx.gamma(0.5*s)*ctx.sqrt(ctx.pi))
totsum = term = ctx.zero
mg = ctx.inf
n = 1
while mg > tol or n < 9:
totsum += term
n += 1
term = f(n)
if term == 0:
mg = ctx.inf
else:
mg = abs(term)
if kwargs.get("error"):
err = mg
return +totsum, err, n
def secondzeta_exp_term(ctx, s, a):
if ctx.isint(s) and ctx.re(s) <= 0:
m = int(round(ctx.re(s)))
if not m & 1:
return ctx.mpf('-0.25')**(-m//2)
tol = ctx.eps
f = lambda n: (0.25*a)**n/((n+0.5*s)*ctx.fac(n))
totsum = ctx.zero
term = f(0)
mg = ctx.inf
n = 0
while mg > tol:
totsum += term
n += 1
term = f(n)
mg = abs(term)
v = a**(0.5*s)*totsum/ctx.gamma(0.5*s)
return v
def secondzeta_singular_term(ctx, s, a, **kwargs):
factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s))
extraprec = ctx.mag(factor)
ctx.prec += extraprec
factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s))
tol = ctx.eps
f = lambda n: ctx.bernpoly(n,0.75)*(4*ctx.sqrt(a))**n*\
ctx.gamma(0.5*n)/((s+n-1)*ctx.fac(n))
totsum = ctx.zero
mg1 = ctx.inf
n = 1
term = f(n)
mg2 = abs(term)
while mg2 > tol and mg2 <= mg1:
totsum += term
n += 1
term = f(n)
totsum += term
n +=1
term = f(n)
mg1 = mg2
mg2 = abs(term)
totsum += term
pole = -2*(s-1)**(-2)+(ctx.euler+ctx.log(16*ctx.pi**2*a))*(s-1)**(-1)
st = factor*(pole+totsum)
err = 0
if kwargs.get("error"):
if not ((mg2 > tol) and (mg2 <= mg1)):
if mg2 <= tol:
err = ctx.mpf(10)**int(ctx.log(abs(factor*tol),10))
if mg2 > mg1:
err = ctx.mpf(10)**int(ctx.log(abs(factor*mg1),10))
err = max(err, ctx.eps*1.)
ctx.prec -= extraprec
return +st, err
@defun
def secondzeta(ctx, s, a = 0.015, **kwargs):
r"""
Evaluates the secondary zeta function `Z(s)`, defined for
`\mathrm{Re}(s)>1` by
.. math ::
Z(s) = \sum_{n=1}^{\infty} \frac{1}{\tau_n^s}
where `\frac12+i\tau_n` runs through the zeros of `\zeta(s)` with
imaginary part positive.
`Z(s)` extends to a meromorphic function on `\mathbb{C}` with a
double pole at `s=1` and simple poles at the points `-2n` for
`n=0`, 1, 2, ...
**Examples**
>>> from mpmath import *
>>> mp.pretty = True; mp.dps = 15
>>> secondzeta(2)
0.023104993115419
>>> xi = lambda s: 0.5*s*(s-1)*pi**(-0.5*s)*gamma(0.5*s)*zeta(s)
>>> Xi = lambda t: xi(0.5+t*j)
>>> -0.5*diff(Xi,0,n=2)/Xi(0)
(0.023104993115419 + 0.0j)
We may ask for an approximate error value::
>>> secondzeta(0.5+100j, error=True)
((-0.216272011276718 - 0.844952708937228j), 2.22044604925031e-16)
The function has poles at the negative odd integers,
and dyadic rational values at the negative even integers::
>>> mp.dps = 30
>>> secondzeta(-8)
-0.67236328125
>>> secondzeta(-7)
+inf
**Implementation notes**
The function is computed as sum of four terms `Z(s)=A(s)-P(s)+E(s)-S(s)`
respectively main, prime, exponential and singular terms.
The main term `A(s)` is computed from the zeros of zeta.
The prime term depends on the von Mangoldt function.
The singular term is responsible for the poles of the function.
The four terms depends on a small parameter `a`. We may change the
value of `a`. Theoretically this has no effect on the sum of the four
terms, but in practice may be important.
A smaller value of the parameter `a` makes `A(s)` depend on
a smaller number of zeros of zeta, but `P(s)` uses more values of
von Mangoldt function.
We may also add a verbose option to obtain data about the
values of the four terms.
>>> mp.dps = 10
>>> secondzeta(0.5 + 40j, error=True, verbose=True)
main term = (-30190318549.138656312556 - 13964804384.624622876523j)
computed using 19 zeros of zeta
prime term = (132717176.89212754625045 + 188980555.17563978290601j)
computed using 9 values of the von Mangoldt function
exponential term = (542447428666.07179812536 + 362434922978.80192435203j)
singular term = (512124392939.98154322355 + 348281138038.65531023921j)
((0.059471043 + 0.3463514534j), 1.455191523e-11)
>>> secondzeta(0.5 + 40j, a=0.04, error=True, verbose=True)
main term = (-151962888.19606243907725 - 217930683.90210294051982j)
computed using 9 zeros of zeta
prime term = (2476659342.3038722372461 + 28711581821.921627163136j)
computed using 37 values of the von Mangoldt function
exponential term = (178506047114.7838188264 + 819674143244.45677330576j)
singular term = (175877424884.22441310708 + 790744630738.28669174871j)
((0.059471043 + 0.3463514534j), 1.455191523e-11)
Notice the great cancellation between the four terms. Changing `a`, the
four terms are very different numbers but the cancellation gives
the good value of Z(s).
**References**
A. Voros, Zeta functions for the Riemann zeros, Ann. Institute Fourier,
53, (2003) 665--699.
A. Voros, Zeta functions over Zeros of Zeta Functions, Lecture Notes
of the Unione Matematica Italiana, Springer, 2009.
"""
s = ctx.convert(s)
a = ctx.convert(a)
tol = ctx.eps
if ctx.isint(s) and ctx.re(s) <= 1:
if abs(s-1) < tol*1000:
return ctx.inf
m = int(round(ctx.re(s)))
if m & 1:
return ctx.inf
else:
return ((-1)**(-m//2)*\
ctx.fraction(8-ctx.eulernum(-m,exact=True),2**(-m+3)))
prec = ctx.prec
try:
t3 = secondzeta_exp_term(ctx, s, a)
extraprec = max(ctx.mag(t3),0)
ctx.prec += extraprec + 3
t1, r1, gt = secondzeta_main_term(ctx,s,a,error='True', verbose='True')
t2, r2, pt = secondzeta_prime_term(ctx,s,a,error='True', verbose='True')
t4, r4 = secondzeta_singular_term(ctx,s,a,error='True')
t3 = secondzeta_exp_term(ctx, s, a)
err = r1+r2+r4
t = t1-t2+t3-t4
if kwargs.get("verbose"):
print('main term =', t1)
print(' computed using', gt, 'zeros of zeta')
print('prime term =', t2)
print(' computed using', pt, 'values of the von Mangoldt function')
print('exponential term =', t3)
print('singular term =', t4)
finally:
ctx.prec = prec
if kwargs.get("error"):
w = max(ctx.mag(abs(t)),0)
err = max(err*2**w, ctx.eps*1.*2**w)
return +t, err
return +t
@defun_wrapped
def lerchphi(ctx, z, s, a):
r"""
Gives the Lerch transcendent, defined for `|z| < 1` and
`\Re{a} > 0` by
.. math ::
\Phi(z,s,a) = \sum_{k=0}^{\infty} \frac{z^k}{(a+k)^s}
and generally by the recurrence `\Phi(z,s,a) = z \Phi(z,s,a+1) + a^{-s}`
along with the integral representation valid for `\Re{a} > 0`
.. math ::
\Phi(z,s,a) = \frac{1}{2 a^s} +
\int_0^{\infty} \frac{z^t}{(a+t)^s} dt -
2 \int_0^{\infty} \frac{\sin(t \log z - s
\operatorname{arctan}(t/a)}{(a^2 + t^2)^{s/2}
(e^{2 \pi t}-1)} dt.
The Lerch transcendent generalizes the Hurwitz zeta function :func:`zeta`
(`z = 1`) and the polylogarithm :func:`polylog` (`a = 1`).
**Examples**
Several evaluations in terms of simpler functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> lerchphi(-1,2,0.5); 4*catalan
3.663862376708876060218414
3.663862376708876060218414
>>> diff(lerchphi, (-1,-2,1), (0,1,0)); 7*zeta(3)/(4*pi**2)
0.2131391994087528954617607
0.2131391994087528954617607
>>> lerchphi(-4,1,1); log(5)/4
0.4023594781085250936501898
0.4023594781085250936501898
>>> lerchphi(-3+2j,1,0.5); 2*atanh(sqrt(-3+2j))/sqrt(-3+2j)
(1.142423447120257137774002 + 0.2118232380980201350495795j)
(1.142423447120257137774002 + 0.2118232380980201350495795j)
Evaluation works for complex arguments and `|z| \ge 1`::
>>> lerchphi(1+2j, 3-j, 4+2j)
(0.002025009957009908600539469 + 0.003327897536813558807438089j)
>>> lerchphi(-2,2,-2.5)
-12.28676272353094275265944
>>> lerchphi(10,10,10)
(-4.462130727102185701817349e-11 + 1.575172198981096218823481e-12j)
>>> lerchphi(10,10,-10.5)
(112658784011940.5605789002 + 498113185.5756221777743631j)
Some degenerate cases::
>>> lerchphi(0,1,2)
0.5
>>> lerchphi(0,1,-2)
-0.5
**References**
1. [DLMF]_ section 25.14
"""
if z == 0:
return a ** (-s)
"""
# Faster, but these cases are useful for testing right now
if z == 1:
return ctx.zeta(s, a)
if a == 1:
return z * ctx.polylog(s, z)
"""
if ctx.re(a) < 1:
if ctx.isnpint(a):
raise ValueError("Lerch transcendent complex infinity")
m = int(ctx.ceil(1-ctx.re(a)))
v = ctx.zero
zpow = ctx.one
for n in xrange(m):
v += zpow / (a+n)**s
zpow *= z
return zpow * ctx.lerchphi(z,s, a+m) + v
g = ctx.ln(z)
v = 1/(2*a**s) + ctx.gammainc(1-s, -a*g) * (-g)**(s-1) / z**a
h = s / 2
r = 2*ctx.pi
f = lambda t: ctx.sin(s*ctx.atan(t/a)-t*g) / \
((a**2+t**2)**h * ctx.expm1(r*t))
v += 2*ctx.quad(f, [0, ctx.inf])
if not ctx.im(z) and not ctx.im(s) and not ctx.im(a) and ctx.re(z) < 1:
v = ctx.chop(v)
return v
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/functions/zeta.py
|
zeta.py
|
import math
from bisect import bisect
from .backend import xrange
from .backend import BACKEND, gmpy, sage, sage_utils, MPZ, MPZ_ONE, MPZ_ZERO
def giant_steps(start, target, n=2):
"""
Return a list of integers ~=
[start, n*start, ..., target/n^2, target/n, target]
but conservatively rounded so that the quotient between two
successive elements is actually slightly less than n.
With n = 2, this describes suitable precision steps for a
quadratically convergent algorithm such as Newton's method;
with n = 3 steps for cubic convergence (Halley's method), etc.
>>> giant_steps(50,1000)
[66, 128, 253, 502, 1000]
>>> giant_steps(50,1000,4)
[65, 252, 1000]
"""
L = [target]
while L[-1] > start*n:
L = L + [L[-1]//n + 2]
return L[::-1]
def rshift(x, n):
"""For an integer x, calculate x >> n with the fastest (floor)
rounding. Unlike the plain Python expression (x >> n), n is
allowed to be negative, in which case a left shift is performed."""
if n >= 0: return x >> n
else: return x << (-n)
def lshift(x, n):
"""For an integer x, calculate x << n. Unlike the plain Python
expression (x << n), n is allowed to be negative, in which case a
right shift with default (floor) rounding is performed."""
if n >= 0: return x << n
else: return x >> (-n)
if BACKEND == 'sage':
import operator
rshift = operator.rshift
lshift = operator.lshift
def python_trailing(n):
"""Count the number of trailing zero bits in abs(n)."""
if not n:
return 0
t = 0
while not n & 1:
n >>= 1
t += 1
return t
if BACKEND == 'gmpy':
if gmpy.version() >= '2':
def gmpy_trailing(n):
"""Count the number of trailing zero bits in abs(n) using gmpy."""
if n: return MPZ(n).bit_scan1()
else: return 0
else:
def gmpy_trailing(n):
"""Count the number of trailing zero bits in abs(n) using gmpy."""
if n: return MPZ(n).scan1()
else: return 0
# Small powers of 2
powers = [1<<_ for _ in range(300)]
def python_bitcount(n):
"""Calculate bit size of the nonnegative integer n."""
bc = bisect(powers, n)
if bc != 300:
return bc
bc = int(math.log(n, 2)) - 4
return bc + bctable[n>>bc]
def gmpy_bitcount(n):
"""Calculate bit size of the nonnegative integer n."""
if n: return MPZ(n).numdigits(2)
else: return 0
#def sage_bitcount(n):
# if n: return MPZ(n).nbits()
# else: return 0
def sage_trailing(n):
return MPZ(n).trailing_zero_bits()
if BACKEND == 'gmpy':
bitcount = gmpy_bitcount
trailing = gmpy_trailing
elif BACKEND == 'sage':
sage_bitcount = sage_utils.bitcount
bitcount = sage_bitcount
trailing = sage_trailing
else:
bitcount = python_bitcount
trailing = python_trailing
if BACKEND == 'gmpy' and 'bit_length' in dir(gmpy):
bitcount = gmpy.bit_length
# Used to avoid slow function calls as far as possible
trailtable = [trailing(n) for n in range(256)]
bctable = [bitcount(n) for n in range(1024)]
# TODO: speed up for bases 2, 4, 8, 16, ...
def bin_to_radix(x, xbits, base, bdigits):
"""Changes radix of a fixed-point number; i.e., converts
x * 2**xbits to floor(x * 10**bdigits)."""
return x * (MPZ(base)**bdigits) >> xbits
stddigits = '0123456789abcdefghijklmnopqrstuvwxyz'
def small_numeral(n, base=10, digits=stddigits):
"""Return the string numeral of a positive integer in an arbitrary
base. Most efficient for small input."""
if base == 10:
return str(n)
digs = []
while n:
n, digit = divmod(n, base)
digs.append(digits[digit])
return "".join(digs[::-1])
def numeral_python(n, base=10, size=0, digits=stddigits):
"""Represent the integer n as a string of digits in the given base.
Recursive division is used to make this function about 3x faster
than Python's str() for converting integers to decimal strings.
The 'size' parameters specifies the number of digits in n; this
number is only used to determine splitting points and need not be
exact."""
if n <= 0:
if not n:
return "0"
return "-" + numeral(-n, base, size, digits)
# Fast enough to do directly
if size < 250:
return small_numeral(n, base, digits)
# Divide in half
half = (size // 2) + (size & 1)
A, B = divmod(n, base**half)
ad = numeral(A, base, half, digits)
bd = numeral(B, base, half, digits).rjust(half, "0")
return ad + bd
def numeral_gmpy(n, base=10, size=0, digits=stddigits):
"""Represent the integer n as a string of digits in the given base.
Recursive division is used to make this function about 3x faster
than Python's str() for converting integers to decimal strings.
The 'size' parameters specifies the number of digits in n; this
number is only used to determine splitting points and need not be
exact."""
if n < 0:
return "-" + numeral(-n, base, size, digits)
# gmpy.digits() may cause a segmentation fault when trying to convert
# extremely large values to a string. The size limit may need to be
# adjusted on some platforms, but 1500000 works on Windows and Linux.
if size < 1500000:
return gmpy.digits(n, base)
# Divide in half
half = (size // 2) + (size & 1)
A, B = divmod(n, MPZ(base)**half)
ad = numeral(A, base, half, digits)
bd = numeral(B, base, half, digits).rjust(half, "0")
return ad + bd
if BACKEND == "gmpy":
numeral = numeral_gmpy
else:
numeral = numeral_python
_1_800 = 1<<800
_1_600 = 1<<600
_1_400 = 1<<400
_1_200 = 1<<200
_1_100 = 1<<100
_1_50 = 1<<50
def isqrt_small_python(x):
"""
Correctly (floor) rounded integer square root, using
division. Fast up to ~200 digits.
"""
if not x:
return x
if x < _1_800:
# Exact with IEEE double precision arithmetic
if x < _1_50:
return int(x**0.5)
# Initial estimate can be any integer >= the true root; round up
r = int(x**0.5 * 1.00000000000001) + 1
else:
bc = bitcount(x)
n = bc//2
r = int((x>>(2*n-100))**0.5+2)<<(n-50) # +2 is to round up
# The following iteration now precisely computes floor(sqrt(x))
# See e.g. Crandall & Pomerance, "Prime Numbers: A Computational
# Perspective"
while 1:
y = (r+x//r)>>1
if y >= r:
return r
r = y
def isqrt_fast_python(x):
"""
Fast approximate integer square root, computed using division-free
Newton iteration for large x. For random integers the result is almost
always correct (floor(sqrt(x))), but is 1 ulp too small with a roughly
0.1% probability. If x is very close to an exact square, the answer is
1 ulp wrong with high probability.
With 0 guard bits, the largest error over a set of 10^5 random
inputs of size 1-10^5 bits was 3 ulp. The use of 10 guard bits
almost certainly guarantees a max 1 ulp error.
"""
# Use direct division-based iteration if sqrt(x) < 2^400
# Assume floating-point square root accurate to within 1 ulp, then:
# 0 Newton iterations good to 52 bits
# 1 Newton iterations good to 104 bits
# 2 Newton iterations good to 208 bits
# 3 Newton iterations good to 416 bits
if x < _1_800:
y = int(x**0.5)
if x >= _1_100:
y = (y + x//y) >> 1
if x >= _1_200:
y = (y + x//y) >> 1
if x >= _1_400:
y = (y + x//y) >> 1
return y
bc = bitcount(x)
guard_bits = 10
x <<= 2*guard_bits
bc += 2*guard_bits
bc += (bc&1)
hbc = bc//2
startprec = min(50, hbc)
# Newton iteration for 1/sqrt(x), with floating-point starting value
r = int(2.0**(2*startprec) * (x >> (bc-2*startprec)) ** -0.5)
pp = startprec
for p in giant_steps(startprec, hbc):
# r**2, scaled from real size 2**(-bc) to 2**p
r2 = (r*r) >> (2*pp - p)
# x*r**2, scaled from real size ~1.0 to 2**p
xr2 = ((x >> (bc-p)) * r2) >> p
# New value of r, scaled from real size 2**(-bc/2) to 2**p
r = (r * ((3<<p) - xr2)) >> (pp+1)
pp = p
# (1/sqrt(x))*x = sqrt(x)
return (r*(x>>hbc)) >> (p+guard_bits)
def sqrtrem_python(x):
"""Correctly rounded integer (floor) square root with remainder."""
# to check cutoff:
# plot(lambda x: timing(isqrt, 2**int(x)), [0,2000])
if x < _1_600:
y = isqrt_small_python(x)
return y, x - y*y
y = isqrt_fast_python(x) + 1
rem = x - y*y
# Correct remainder
while rem < 0:
y -= 1
rem += (1+2*y)
else:
if rem:
while rem > 2*(1+y):
y += 1
rem -= (1+2*y)
return y, rem
def isqrt_python(x):
"""Integer square root with correct (floor) rounding."""
return sqrtrem_python(x)[0]
def sqrt_fixed(x, prec):
return isqrt_fast(x<<prec)
sqrt_fixed2 = sqrt_fixed
if BACKEND == 'gmpy':
isqrt_small = isqrt_fast = isqrt = gmpy.sqrt
sqrtrem = gmpy.sqrtrem
elif BACKEND == 'sage':
isqrt_small = isqrt_fast = isqrt = \
getattr(sage_utils, "isqrt", lambda n: MPZ(n).isqrt())
sqrtrem = lambda n: MPZ(n).sqrtrem()
else:
isqrt_small = isqrt_small_python
isqrt_fast = isqrt_fast_python
isqrt = isqrt_python
sqrtrem = sqrtrem_python
def ifib(n, _cache={}):
"""Computes the nth Fibonacci number as an integer, for
integer n."""
if n < 0:
return (-1)**(-n+1) * ifib(-n)
if n in _cache:
return _cache[n]
m = n
# Use Dijkstra's logarithmic algorithm
# The following implementation is basically equivalent to
# http://en.literateprograms.org/Fibonacci_numbers_(Scheme)
a, b, p, q = MPZ_ONE, MPZ_ZERO, MPZ_ZERO, MPZ_ONE
while n:
if n & 1:
aq = a*q
a, b = b*q+aq+a*p, b*p+aq
n -= 1
else:
qq = q*q
p, q = p*p+qq, qq+2*p*q
n >>= 1
if m < 250:
_cache[m] = b
return b
MAX_FACTORIAL_CACHE = 1000
def ifac(n, memo={0:1, 1:1}):
"""Return n factorial (for integers n >= 0 only)."""
f = memo.get(n)
if f:
return f
k = len(memo)
p = memo[k-1]
MAX = MAX_FACTORIAL_CACHE
while k <= n:
p *= k
if k <= MAX:
memo[k] = p
k += 1
return p
def ifac2(n, memo_pair=[{0:1}, {1:1}]):
"""Return n!! (double factorial), integers n >= 0 only."""
memo = memo_pair[n&1]
f = memo.get(n)
if f:
return f
k = max(memo)
p = memo[k]
MAX = MAX_FACTORIAL_CACHE
while k < n:
k += 2
p *= k
if k <= MAX:
memo[k] = p
return p
if BACKEND == 'gmpy':
ifac = gmpy.fac
elif BACKEND == 'sage':
ifac = lambda n: int(sage.factorial(n))
ifib = sage.fibonacci
def list_primes(n):
n = n + 1
sieve = list(xrange(n))
sieve[:2] = [0, 0]
for i in xrange(2, int(n**0.5)+1):
if sieve[i]:
for j in xrange(i**2, n, i):
sieve[j] = 0
return [p for p in sieve if p]
if BACKEND == 'sage':
# Note: it is *VERY* important for performance that we convert
# the list to Python ints.
def list_primes(n):
return [int(_) for _ in sage.primes(n+1)]
small_odd_primes = (3,5,7,11,13,17,19,23,29,31,37,41,43,47)
small_odd_primes_set = set(small_odd_primes)
def isprime(n):
"""
Determines whether n is a prime number. A probabilistic test is
performed if n is very large. No special trick is used for detecting
perfect powers.
>>> sum(list_primes(100000))
454396537
>>> sum(n*isprime(n) for n in range(100000))
454396537
"""
n = int(n)
if not n & 1:
return n == 2
if n < 50:
return n in small_odd_primes_set
for p in small_odd_primes:
if not n % p:
return False
m = n-1
s = trailing(m)
d = m >> s
def test(a):
x = pow(a,d,n)
if x == 1 or x == m:
return True
for r in xrange(1,s):
x = x**2 % n
if x == m:
return True
return False
# See http://primes.utm.edu/prove/prove2_3.html
if n < 1373653:
witnesses = [2,3]
elif n < 341550071728321:
witnesses = [2,3,5,7,11,13,17]
else:
witnesses = small_odd_primes
for a in witnesses:
if not test(a):
return False
return True
def moebius(n):
"""
Evaluates the Moebius function which is `mu(n) = (-1)^k` if `n`
is a product of `k` distinct primes and `mu(n) = 0` otherwise.
TODO: speed up using factorization
"""
n = abs(int(n))
if n < 2:
return n
factors = []
for p in xrange(2, n+1):
if not (n % p):
if not (n % p**2):
return 0
if not sum(p % f for f in factors):
factors.append(p)
return (-1)**len(factors)
def gcd(*args):
a = 0
for b in args:
if a:
while b:
a, b = b, a % b
else:
a = b
return a
# Comment by Juan Arias de Reyna:
#
# I learn this method to compute EulerE[2n] from van de Lune.
#
# We apply the formula EulerE[2n] = (-1)^n 2**(-2n) sum_{j=0}^n a(2n,2j+1)
#
# where the numbers a(n,j) vanish for j > n+1 or j <= -1 and satisfies
#
# a(0,-1) = a(0,0) = 0; a(0,1)= 1; a(0,2) = a(0,3) = 0
#
# a(n,j) = a(n-1,j) when n+j is even
# a(n,j) = (j-1) a(n-1,j-1) + (j+1) a(n-1,j+1) when n+j is odd
#
#
# But we can use only one array unidimensional a(j) since to compute
# a(n,j) we only need to know a(n-1,k) where k and j are of different parity
# and we have not to conserve the used values.
#
# We cached up the values of Euler numbers to sufficiently high order.
#
# Important Observation: If we pretend to use the numbers
# EulerE[1], EulerE[2], ... , EulerE[n]
# it is convenient to compute first EulerE[n], since the algorithm
# computes first all
# the previous ones, and keeps them in the CACHE
MAX_EULER_CACHE = 500
def eulernum(m, _cache={0:MPZ_ONE}):
r"""
Computes the Euler numbers `E(n)`, which can be defined as
coefficients of the Taylor expansion of `1/cosh x`:
.. math ::
\frac{1}{\cosh x} = \sum_{n=0}^\infty \frac{E_n}{n!} x^n
Example::
>>> [int(eulernum(n)) for n in range(11)]
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521]
>>> [int(eulernum(n)) for n in range(11)] # test cache
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521]
"""
# for odd m > 1, the Euler numbers are zero
if m & 1:
return MPZ_ZERO
f = _cache.get(m)
if f:
return f
MAX = MAX_EULER_CACHE
n = m
a = [MPZ(_) for _ in [0,0,1,0,0,0]]
for n in range(1, m+1):
for j in range(n+1, -1, -2):
a[j+1] = (j-1)*a[j] + (j+1)*a[j+2]
a.append(0)
suma = 0
for k in range(n+1, -1, -2):
suma += a[k+1]
if n <= MAX:
_cache[n] = ((-1)**(n//2))*(suma // 2**n)
if n == m:
return ((-1)**(n//2))*suma // 2**n
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/libintmath.py
|
libintmath.py
|
import sys
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, BACKEND
from .libmpf import (\
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast, bitcount,
bctable, normalize, normalize1, reciprocal_rnd, rshift, lshift, giant_steps,
negative_rnd,
to_str, to_fixed, from_man_exp, from_float, to_float, from_int, to_int,
fzero, fone, ftwo, fhalf, finf, fninf, fnan, fnone,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul,
mpf_div, mpf_mul_int, mpf_shift, mpf_sqrt, mpf_hypot,
mpf_rdiv_int, mpf_floor, mpf_ceil, mpf_nint, mpf_frac,
mpf_sign, mpf_hash,
ComplexResult
)
from .libelefun import (\
mpf_pi, mpf_exp, mpf_log, mpf_cos_sin, mpf_cosh_sinh, mpf_tan, mpf_pow_int,
mpf_log_hypot,
mpf_cos_sin_pi, mpf_phi,
mpf_cos, mpf_sin, mpf_cos_pi, mpf_sin_pi,
mpf_atan, mpf_atan2, mpf_cosh, mpf_sinh, mpf_tanh,
mpf_asin, mpf_acos, mpf_acosh, mpf_nthroot, mpf_fibonacci
)
# An mpc value is a (real, imag) tuple
mpc_one = fone, fzero
mpc_zero = fzero, fzero
mpc_two = ftwo, fzero
mpc_half = (fhalf, fzero)
_infs = (finf, fninf)
_infs_nan = (finf, fninf, fnan)
def mpc_is_inf(z):
"""Check if either real or imaginary part is infinite"""
re, im = z
if re in _infs: return True
if im in _infs: return True
return False
def mpc_is_infnan(z):
"""Check if either real or imaginary part is infinite or nan"""
re, im = z
if re in _infs_nan: return True
if im in _infs_nan: return True
return False
def mpc_to_str(z, dps, **kwargs):
re, im = z
rs = to_str(re, dps)
if im[0]:
return rs + " - " + to_str(mpf_neg(im), dps, **kwargs) + "j"
else:
return rs + " + " + to_str(im, dps, **kwargs) + "j"
def mpc_to_complex(z, strict=False):
re, im = z
return complex(to_float(re, strict), to_float(im, strict))
def mpc_hash(z):
if sys.version >= "3.2":
re, im = z
h = mpf_hash(re) + sys.hash_info.imag * mpf_hash(im)
# Need to reduce either module 2^32 or 2^64
h = h % (2**sys.hash_info.width)
return int(h)
else:
try:
return hash(mpc_to_complex(z, strict=True))
except OverflowError:
return hash(z)
def mpc_conjugate(z, prec, rnd=round_fast):
re, im = z
return re, mpf_neg(im, prec, rnd)
def mpc_is_nonzero(z):
return z != mpc_zero
def mpc_add(z, w, prec, rnd=round_fast):
a, b = z
c, d = w
return mpf_add(a, c, prec, rnd), mpf_add(b, d, prec, rnd)
def mpc_add_mpf(z, x, prec, rnd=round_fast):
a, b = z
return mpf_add(a, x, prec, rnd), b
def mpc_sub(z, w, prec=0, rnd=round_fast):
a, b = z
c, d = w
return mpf_sub(a, c, prec, rnd), mpf_sub(b, d, prec, rnd)
def mpc_sub_mpf(z, p, prec=0, rnd=round_fast):
a, b = z
return mpf_sub(a, p, prec, rnd), b
def mpc_pos(z, prec, rnd=round_fast):
a, b = z
return mpf_pos(a, prec, rnd), mpf_pos(b, prec, rnd)
def mpc_neg(z, prec=None, rnd=round_fast):
a, b = z
return mpf_neg(a, prec, rnd), mpf_neg(b, prec, rnd)
def mpc_shift(z, n):
a, b = z
return mpf_shift(a, n), mpf_shift(b, n)
def mpc_abs(z, prec, rnd=round_fast):
"""Absolute value of a complex number, |a+bi|.
Returns an mpf value."""
a, b = z
return mpf_hypot(a, b, prec, rnd)
def mpc_arg(z, prec, rnd=round_fast):
"""Argument of a complex number. Returns an mpf value."""
a, b = z
return mpf_atan2(b, a, prec, rnd)
def mpc_floor(z, prec, rnd=round_fast):
a, b = z
return mpf_floor(a, prec, rnd), mpf_floor(b, prec, rnd)
def mpc_ceil(z, prec, rnd=round_fast):
a, b = z
return mpf_ceil(a, prec, rnd), mpf_ceil(b, prec, rnd)
def mpc_nint(z, prec, rnd=round_fast):
a, b = z
return mpf_nint(a, prec, rnd), mpf_nint(b, prec, rnd)
def mpc_frac(z, prec, rnd=round_fast):
a, b = z
return mpf_frac(a, prec, rnd), mpf_frac(b, prec, rnd)
def mpc_mul(z, w, prec, rnd=round_fast):
"""
Complex multiplication.
Returns the real and imaginary part of (a+bi)*(c+di), rounded to
the specified precision. The rounding mode applies to the real and
imaginary parts separately.
"""
a, b = z
c, d = w
p = mpf_mul(a, c)
q = mpf_mul(b, d)
r = mpf_mul(a, d)
s = mpf_mul(b, c)
re = mpf_sub(p, q, prec, rnd)
im = mpf_add(r, s, prec, rnd)
return re, im
def mpc_square(z, prec, rnd=round_fast):
# (a+b*I)**2 == a**2 - b**2 + 2*I*a*b
a, b = z
p = mpf_mul(a,a)
q = mpf_mul(b,b)
r = mpf_mul(a,b, prec, rnd)
re = mpf_sub(p, q, prec, rnd)
im = mpf_shift(r, 1)
return re, im
def mpc_mul_mpf(z, p, prec, rnd=round_fast):
a, b = z
re = mpf_mul(a, p, prec, rnd)
im = mpf_mul(b, p, prec, rnd)
return re, im
def mpc_mul_imag_mpf(z, x, prec, rnd=round_fast):
"""
Multiply the mpc value z by I*x where x is an mpf value.
"""
a, b = z
re = mpf_neg(mpf_mul(b, x, prec, rnd))
im = mpf_mul(a, x, prec, rnd)
return re, im
def mpc_mul_int(z, n, prec, rnd=round_fast):
a, b = z
re = mpf_mul_int(a, n, prec, rnd)
im = mpf_mul_int(b, n, prec, rnd)
return re, im
def mpc_div(z, w, prec, rnd=round_fast):
a, b = z
c, d = w
wp = prec + 10
# mag = c*c + d*d
mag = mpf_add(mpf_mul(c, c), mpf_mul(d, d), wp)
# (a*c+b*d)/mag, (b*c-a*d)/mag
t = mpf_add(mpf_mul(a,c), mpf_mul(b,d), wp)
u = mpf_sub(mpf_mul(b,c), mpf_mul(a,d), wp)
return mpf_div(t,mag,prec,rnd), mpf_div(u,mag,prec,rnd)
def mpc_div_mpf(z, p, prec, rnd=round_fast):
"""Calculate z/p where p is real"""
a, b = z
re = mpf_div(a, p, prec, rnd)
im = mpf_div(b, p, prec, rnd)
return re, im
def mpc_reciprocal(z, prec, rnd=round_fast):
"""Calculate 1/z efficiently"""
a, b = z
m = mpf_add(mpf_mul(a,a),mpf_mul(b,b),prec+10)
re = mpf_div(a, m, prec, rnd)
im = mpf_neg(mpf_div(b, m, prec, rnd))
return re, im
def mpc_mpf_div(p, z, prec, rnd=round_fast):
"""Calculate p/z where p is real efficiently"""
a, b = z
m = mpf_add(mpf_mul(a,a),mpf_mul(b,b), prec+10)
re = mpf_div(mpf_mul(a,p), m, prec, rnd)
im = mpf_div(mpf_neg(mpf_mul(b,p)), m, prec, rnd)
return re, im
def complex_int_pow(a, b, n):
"""Complex integer power: computes (a+b*I)**n exactly for
nonnegative n (a and b must be Python ints)."""
wre = 1
wim = 0
while n:
if n & 1:
wre, wim = wre*a - wim*b, wim*a + wre*b
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
return wre, wim
def mpc_pow(z, w, prec, rnd=round_fast):
if w[1] == fzero:
return mpc_pow_mpf(z, w[0], prec, rnd)
return mpc_exp(mpc_mul(mpc_log(z, prec+10), w, prec+10), prec, rnd)
def mpc_pow_mpf(z, p, prec, rnd=round_fast):
psign, pman, pexp, pbc = p
if pexp >= 0:
return mpc_pow_int(z, (-1)**psign * (pman<<pexp), prec, rnd)
if pexp == -1:
sqrtz = mpc_sqrt(z, prec+10)
return mpc_pow_int(sqrtz, (-1)**psign * pman, prec, rnd)
return mpc_exp(mpc_mul_mpf(mpc_log(z, prec+10), p, prec+10), prec, rnd)
def mpc_pow_int(z, n, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_pow_int(a, n, prec, rnd), fzero
if a == fzero:
v = mpf_pow_int(b, n, prec, rnd)
n %= 4
if n == 0:
return v, fzero
elif n == 1:
return fzero, v
elif n == 2:
return mpf_neg(v), fzero
elif n == 3:
return fzero, mpf_neg(v)
if n == 0: return mpc_one
if n == 1: return mpc_pos(z, prec, rnd)
if n == 2: return mpc_square(z, prec, rnd)
if n == -1: return mpc_reciprocal(z, prec, rnd)
if n < 0: return mpc_reciprocal(mpc_pow_int(z, -n, prec+4), prec, rnd)
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if asign: aman = -aman
if bsign: bman = -bman
de = aexp - bexp
abs_de = abs(de)
exact_size = n*(abs_de + max(abc, bbc))
if exact_size < 10000:
if de > 0:
aman <<= de
aexp = bexp
else:
bman <<= (-de)
bexp = aexp
re, im = complex_int_pow(aman, bman, n)
re = from_man_exp(re, int(n*aexp), prec, rnd)
im = from_man_exp(im, int(n*bexp), prec, rnd)
return re, im
return mpc_exp(mpc_mul_int(mpc_log(z, prec+10), n, prec+10), prec, rnd)
def mpc_sqrt(z, prec, rnd=round_fast):
"""Complex square root (principal branch).
We have sqrt(a+bi) = sqrt((r+a)/2) + b/sqrt(2*(r+a))*i where
r = abs(a+bi), when a+bi is not a negative real number."""
a, b = z
if b == fzero:
if a == fzero:
return (a, b)
# When a+bi is a negative real number, we get a real sqrt times i
if a[0]:
im = mpf_sqrt(mpf_neg(a), prec, rnd)
return (fzero, im)
else:
re = mpf_sqrt(a, prec, rnd)
return (re, fzero)
wp = prec+20
if not a[0]: # case a positive
t = mpf_add(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) + a
u = mpf_shift(t, -1) # u = t/2
re = mpf_sqrt(u, prec, rnd) # re = sqrt(u)
v = mpf_shift(t, 1) # v = 2*t
w = mpf_sqrt(v, wp) # w = sqrt(v)
im = mpf_div(b, w, prec, rnd) # im = b / w
else: # case a negative
t = mpf_sub(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) - a
u = mpf_shift(t, -1) # u = t/2
im = mpf_sqrt(u, prec, rnd) # im = sqrt(u)
v = mpf_shift(t, 1) # v = 2*t
w = mpf_sqrt(v, wp) # w = sqrt(v)
re = mpf_div(b, w, prec, rnd) # re = b/w
if b[0]:
re = mpf_neg(re)
im = mpf_neg(im)
return re, im
def mpc_nthroot_fixed(a, b, n, prec):
# a, b signed integers at fixed precision prec
start = 50
a1 = int(rshift(a, prec - n*start))
b1 = int(rshift(b, prec - n*start))
try:
r = (a1 + 1j * b1)**(1.0/n)
re = r.real
im = r.imag
re = MPZ(int(re))
im = MPZ(int(im))
except OverflowError:
a1 = from_int(a1, start)
b1 = from_int(b1, start)
fn = from_int(n)
nth = mpf_rdiv_int(1, fn, start)
re, im = mpc_pow((a1, b1), (nth, fzero), start)
re = to_int(re)
im = to_int(im)
extra = 10
prevp = start
extra1 = n
for p in giant_steps(start, prec+extra):
# this is slow for large n, unlike int_pow_fixed
re2, im2 = complex_int_pow(re, im, n-1)
re2 = rshift(re2, (n-1)*prevp - p - extra1)
im2 = rshift(im2, (n-1)*prevp - p - extra1)
r4 = (re2*re2 + im2*im2) >> (p + extra1)
ap = rshift(a, prec - p)
bp = rshift(b, prec - p)
rec = (ap * re2 + bp * im2) >> p
imc = (-ap * im2 + bp * re2) >> p
reb = (rec << p) // r4
imb = (imc << p) // r4
re = (reb + (n-1)*lshift(re, p-prevp))//n
im = (imb + (n-1)*lshift(im, p-prevp))//n
prevp = p
return re, im
def mpc_nthroot(z, n, prec, rnd=round_fast):
"""
Complex n-th root.
Use Newton method as in the real case when it is faster,
otherwise use z**(1/n)
"""
a, b = z
if a[0] == 0 and b == fzero:
re = mpf_nthroot(a, n, prec, rnd)
return (re, fzero)
if n < 2:
if n == 0:
return mpc_one
if n == 1:
return mpc_pos((a, b), prec, rnd)
if n == -1:
return mpc_div(mpc_one, (a, b), prec, rnd)
inverse = mpc_nthroot((a, b), -n, prec+5, reciprocal_rnd[rnd])
return mpc_div(mpc_one, inverse, prec, rnd)
if n <= 20:
prec2 = int(1.2 * (prec + 10))
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
pf = mpc_abs((a,b), prec)
if pf[-2] + pf[-1] > -10 and pf[-2] + pf[-1] < prec:
af = to_fixed(a, prec2)
bf = to_fixed(b, prec2)
re, im = mpc_nthroot_fixed(af, bf, n, prec2)
extra = 10
re = from_man_exp(re, -prec2-extra, prec2, rnd)
im = from_man_exp(im, -prec2-extra, prec2, rnd)
return re, im
fn = from_int(n)
prec2 = prec+10 + 10
nth = mpf_rdiv_int(1, fn, prec2)
re, im = mpc_pow((a, b), (nth, fzero), prec2, rnd)
re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
return re, im
def mpc_cbrt(z, prec, rnd=round_fast):
"""
Complex cubic root.
"""
return mpc_nthroot(z, 3, prec, rnd)
def mpc_exp(z, prec, rnd=round_fast):
"""
Complex exponential function.
We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i)
for the computation. This formula is very nice because it is
pefectly stable; since we just do real multiplications, the only
numerical errors that can creep in are single-ulp rounding errors.
The formula is efficient since mpmath's real exp is quite fast and
since we can compute cos and sin simultaneously.
It is no problem if a and b are large; if the implementations of
exp/cos/sin are accurate and efficient for all real numbers, then
so is this function for all complex numbers.
"""
a, b = z
if a == fzero:
return mpf_cos_sin(b, prec, rnd)
if b == fzero:
return mpf_exp(a, prec, rnd), fzero
mag = mpf_exp(a, prec+4, rnd)
c, s = mpf_cos_sin(b, prec+4, rnd)
re = mpf_mul(mag, c, prec, rnd)
im = mpf_mul(mag, s, prec, rnd)
return re, im
def mpc_log(z, prec, rnd=round_fast):
re = mpf_log_hypot(z[0], z[1], prec, rnd)
im = mpc_arg(z, prec, rnd)
return re, im
def mpc_cos(z, prec, rnd=round_fast):
"""Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) -
sin(a)*sinh(b)*i.
The same comments apply as for the complex exp: only real
multiplications are pewrormed, so no cancellation errors are
possible. The formula is also efficient since we can compute both
pairs (cos, sin) and (cosh, sinh) in single stwps."""
a, b = z
if b == fzero:
return mpf_cos(a, prec, rnd), fzero
if a == fzero:
return mpf_cosh(b, prec, rnd), fzero
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(c, ch, prec, rnd)
im = mpf_mul(s, sh, prec, rnd)
return re, mpf_neg(im)
def mpc_sin(z, prec, rnd=round_fast):
"""Complex sine. We have sin(a+bi) = sin(a)*cosh(b) +
cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional
comments."""
a, b = z
if b == fzero:
return mpf_sin(a, prec, rnd), fzero
if a == fzero:
return fzero, mpf_sinh(b, prec, rnd)
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(s, ch, prec, rnd)
im = mpf_mul(c, sh, prec, rnd)
return re, im
def mpc_tan(z, prec, rnd=round_fast):
"""Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i
where M = cos(2a) + cosh(2b)."""
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero: return mpf_tan(a, prec, rnd), fzero
if a == fzero: return fzero, mpf_tanh(b, prec, rnd)
wp = prec + 15
a = mpf_shift(a, 1)
b = mpf_shift(b, 1)
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
# TODO: handle cancellation when c ~= -1 and ch ~= 1
mag = mpf_add(c, ch, wp)
re = mpf_div(s, mag, prec, rnd)
im = mpf_div(sh, mag, prec, rnd)
return re, im
def mpc_cos_pi(z, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_cos_pi(a, prec, rnd), fzero
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
return mpf_cosh(b, prec, rnd), fzero
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(c, ch, prec, rnd)
im = mpf_mul(s, sh, prec, rnd)
return re, mpf_neg(im)
def mpc_sin_pi(z, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_sin_pi(a, prec, rnd), fzero
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
return fzero, mpf_sinh(b, prec, rnd)
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(s, ch, prec, rnd)
im = mpf_mul(c, sh, prec, rnd)
return re, im
def mpc_cos_sin(z, prec, rnd=round_fast):
a, b = z
if a == fzero:
ch, sh = mpf_cosh_sinh(b, prec, rnd)
return (ch, fzero), (fzero, sh)
if b == fzero:
c, s = mpf_cos_sin(a, prec, rnd)
return (c, fzero), (s, fzero)
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
cre = mpf_mul(c, ch, prec, rnd)
cim = mpf_mul(s, sh, prec, rnd)
sre = mpf_mul(s, ch, prec, rnd)
sim = mpf_mul(c, sh, prec, rnd)
return (cre, mpf_neg(cim)), (sre, sim)
def mpc_cos_sin_pi(z, prec, rnd=round_fast):
a, b = z
if b == fzero:
c, s = mpf_cos_sin_pi(a, prec, rnd)
return (c, fzero), (s, fzero)
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
ch, sh = mpf_cosh_sinh(b, prec, rnd)
return (ch, fzero), (fzero, sh)
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
cre = mpf_mul(c, ch, prec, rnd)
cim = mpf_mul(s, sh, prec, rnd)
sre = mpf_mul(s, ch, prec, rnd)
sim = mpf_mul(c, sh, prec, rnd)
return (cre, mpf_neg(cim)), (sre, sim)
def mpc_cosh(z, prec, rnd=round_fast):
"""Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i)."""
a, b = z
return mpc_cos((b, mpf_neg(a)), prec, rnd)
def mpc_sinh(z, prec, rnd=round_fast):
"""Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i)."""
a, b = z
b, a = mpc_sin((b, a), prec, rnd)
return a, b
def mpc_tanh(z, prec, rnd=round_fast):
"""Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i)."""
a, b = z
b, a = mpc_tan((b, a), prec, rnd)
return a, b
# TODO: avoid loss of accuracy
def mpc_atan(z, prec, rnd=round_fast):
a, b = z
# atan(z) = (I/2)*(log(1-I*z) - log(1+I*z))
# x = 1-I*z = 1 + b - I*a
# y = 1+I*z = 1 - b + I*a
wp = prec + 15
x = mpf_add(fone, b, wp), mpf_neg(a)
y = mpf_sub(fone, b, wp), a
l1 = mpc_log(x, wp)
l2 = mpc_log(y, wp)
a, b = mpc_sub(l1, l2, prec, rnd)
# (I/2) * (a+b*I) = (-b/2 + a/2*I)
v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1)
# Subtraction at infinity gives correct real part but
# wrong imaginary part (should be zero)
if v[1] == fnan and mpc_is_inf(z):
v = (v[0], fzero)
return v
beta_crossover = from_float(0.6417)
alpha_crossover = from_float(1.5)
def acos_asin(z, prec, rnd, n):
""" complex acos for n = 0, asin for n = 1
The algorithm is described in
T.E. Hull, T.F. Fairgrieve and P.T.P. Tang
'Implementing the Complex Arcsine and Arcosine Functions
using Exception Handling',
ACM Trans. on Math. Software Vol. 23 (1997), p299
The complex acos and asin can be defined as
acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1))
asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1))
where z = a + I*b
alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha
r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2)
These expressions are rewritten in different ways in different
regions, delimited by two crossovers alpha_crossover and beta_crossover,
and by abs(a) <= 1, in order to improve the numerical accuracy.
"""
a, b = z
wp = prec + 10
# special cases with real argument
if b == fzero:
am = mpf_sub(fone, mpf_abs(a), wp)
# case abs(a) <= 1
if not am[0]:
if n == 0:
return mpf_acos(a, prec, rnd), fzero
else:
return mpf_asin(a, prec, rnd), fzero
# cases abs(a) > 1
else:
# case a < -1
if a[0]:
pi = mpf_pi(prec, rnd)
c = mpf_acosh(mpf_neg(a), prec, rnd)
if n == 0:
return pi, mpf_neg(c)
else:
return mpf_neg(mpf_shift(pi, -1)), c
# case a > 1
else:
c = mpf_acosh(a, prec, rnd)
if n == 0:
return fzero, c
else:
pi = mpf_pi(prec, rnd)
return mpf_shift(pi, -1), mpf_neg(c)
asign = bsign = 0
if a[0]:
a = mpf_neg(a)
asign = 1
if b[0]:
b = mpf_neg(b)
bsign = 1
am = mpf_sub(fone, a, wp)
ap = mpf_add(fone, a, wp)
r = mpf_hypot(ap, b, wp)
s = mpf_hypot(am, b, wp)
alpha = mpf_shift(mpf_add(r, s, wp), -1)
beta = mpf_div(a, alpha, wp)
b2 = mpf_mul(b,b, wp)
# case beta <= beta_crossover
if not mpf_sub(beta_crossover, beta, wp)[0]:
if n == 0:
re = mpf_acos(beta, wp)
else:
re = mpf_asin(beta, wp)
else:
# to compute the real part in this region use the identity
# asin(beta) = atan(beta/sqrt(1-beta**2))
# beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a)
# alpha + a is numerically accurate; alpha - a can have
# cancellations leading to numerical inaccuracies, so rewrite
# it in differente ways according to the region
Ax = mpf_add(alpha, a, wp)
# case a <= 1
if not am[0]:
# c = b*b/(r + (a+1)); d = (s + (1-a))
# alpha - a = (1/2)*(c + d)
# case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a)
# case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d)))
c = mpf_div(b2, mpf_add(r, ap, wp), wp)
d = mpf_add(s, am, wp)
re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1)
if n == 0:
re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp)
else:
re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp)
else:
# c = Ax/(r + (a+1)); d = Ax/(s - (1-a))
# alpha - a = (1/2)*(c + d)
# case n = 0: re = atan(b*sqrt(c + d)/2/a)
# case n = 1: re = atan(a/(b*sqrt(c + d)/2)
c = mpf_div(Ax, mpf_add(r, ap, wp), wp)
d = mpf_div(Ax, mpf_sub(s, am, wp), wp)
re = mpf_shift(mpf_add(c, d, wp), -1)
re = mpf_mul(b, mpf_sqrt(re, wp), wp)
if n == 0:
re = mpf_atan(mpf_div(re, a, wp), wp)
else:
re = mpf_atan(mpf_div(a, re, wp), wp)
# to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover
# replace it with 1 + Am1 + sqrt(Am1*(alpha+1)))
# where Am1 = alpha -1
# if alpha <= alpha_crossover:
if not mpf_sub(alpha_crossover, alpha, wp)[0]:
c1 = mpf_div(b2, mpf_add(r, ap, wp), wp)
# case a < 1
if mpf_neg(am)[0]:
# Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a))
c2 = mpf_add(s, am, wp)
c2 = mpf_div(b2, c2, wp)
Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
else:
# Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a)))
c2 = mpf_sub(s, am, wp)
Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
# im = log(1 + Am1 + sqrt(Am1*(alpha+1)))
im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp)
im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp)
else:
# im = log(alpha + sqrt(alpha*alpha - 1))
im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp)
im = mpf_log(mpf_add(alpha, im, wp), wp)
if asign:
if n == 0:
re = mpf_sub(mpf_pi(wp), re, wp)
else:
re = mpf_neg(re)
if not bsign and n == 0:
im = mpf_neg(im)
if bsign and n == 1:
im = mpf_neg(im)
re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
return re, im
def mpc_acos(z, prec, rnd=round_fast):
return acos_asin(z, prec, rnd, 0)
def mpc_asin(z, prec, rnd=round_fast):
return acos_asin(z, prec, rnd, 1)
def mpc_asinh(z, prec, rnd=round_fast):
# asinh(z) = I * asin(-I z)
a, b = z
a, b = mpc_asin((b, mpf_neg(a)), prec, rnd)
return mpf_neg(b), a
def mpc_acosh(z, prec, rnd=round_fast):
# acosh(z) = -I * acos(z) for Im(acos(z)) <= 0
# +I * acos(z) otherwise
a, b = mpc_acos(z, prec, rnd)
if b[0] or b == fzero:
return mpf_neg(b), a
else:
return b, mpf_neg(a)
def mpc_atanh(z, prec, rnd=round_fast):
# atanh(z) = (log(1+z)-log(1-z))/2
wp = prec + 15
a = mpc_add(z, mpc_one, wp)
b = mpc_sub(mpc_one, z, wp)
a = mpc_log(a, wp)
b = mpc_log(b, wp)
v = mpc_shift(mpc_sub(a, b, wp), -1)
# Subtraction at infinity gives correct imaginary part but
# wrong real part (should be zero)
if v[0] == fnan and mpc_is_inf(z):
v = (fzero, v[1])
return v
def mpc_fibonacci(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
return (mpf_fibonacci(re, prec, rnd), fzero)
size = max(abs(re[2]+re[3]), abs(re[2]+re[3]))
wp = prec + size + 20
a = mpf_phi(wp)
b = mpf_add(mpf_shift(a, 1), fnone, wp)
u = mpc_pow((a, fzero), z, wp)
v = mpc_cos_pi(z, wp)
v = mpc_div(v, u, wp)
u = mpc_sub(u, v, wp)
u = mpc_div_mpf(u, b, prec, rnd)
return u
def mpf_expj(x, prec, rnd='f'):
raise ComplexResult
def mpc_expj(z, prec, rnd='f'):
re, im = z
if im == fzero:
return mpf_cos_sin(re, prec, rnd)
if re == fzero:
return mpf_exp(mpf_neg(im), prec, rnd), fzero
ey = mpf_exp(mpf_neg(im), prec+10)
c, s = mpf_cos_sin(re, prec+10)
re = mpf_mul(ey, c, prec, rnd)
im = mpf_mul(ey, s, prec, rnd)
return re, im
def mpf_expjpi(x, prec, rnd='f'):
raise ComplexResult
def mpc_expjpi(z, prec, rnd='f'):
re, im = z
if im == fzero:
return mpf_cos_sin_pi(re, prec, rnd)
sign, man, exp, bc = im
wp = prec+10
if man:
wp += max(0, exp+bc)
im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp))
if re == fzero:
return mpf_exp(im, prec, rnd), fzero
ey = mpf_exp(im, prec+10)
c, s = mpf_cos_sin_pi(re, prec+10)
re = mpf_mul(ey, c, prec, rnd)
im = mpf_mul(ey, s, prec, rnd)
return re, im
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as _lbmp
mpc_exp = _lbmp.mpc_exp
mpc_sqrt = _lbmp.mpc_sqrt
except (ImportError, AttributeError):
print("Warning: Sage imports in libmpc failed")
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/libmpc.py
|
libmpc.py
|
from .backend import xrange
from .libmpf import (
ComplexResult,
round_down, round_up, round_floor, round_ceiling, round_nearest,
prec_to_dps, repr_dps, dps_to_prec,
bitcount,
from_float,
fnan, finf, fninf, fzero, fhalf, fone, fnone,
mpf_sign, mpf_lt, mpf_le, mpf_gt, mpf_ge, mpf_eq, mpf_cmp,
mpf_min_max,
mpf_floor, from_int, to_int, to_str, from_str,
mpf_abs, mpf_neg, mpf_pos, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_shift, mpf_pow_int,
from_man_exp, MPZ_ONE)
from .libelefun import (
mpf_log, mpf_exp, mpf_sqrt, mpf_atan, mpf_atan2,
mpf_pi, mod_pi2, mpf_cos_sin
)
from .gammazeta import mpf_gamma, mpf_rgamma, mpf_loggamma, mpc_loggamma
def mpi_str(s, prec):
sa, sb = s
dps = prec_to_dps(prec) + 5
return "[%s, %s]" % (to_str(sa, dps), to_str(sb, dps))
#dps = prec_to_dps(prec)
#m = mpi_mid(s, prec)
#d = mpf_shift(mpi_delta(s, 20), -1)
#return "%s +/- %s" % (to_str(m, dps), to_str(d, 3))
mpi_zero = (fzero, fzero)
mpi_one = (fone, fone)
def mpi_eq(s, t):
return s == t
def mpi_ne(s, t):
return s != t
def mpi_lt(s, t):
sa, sb = s
ta, tb = t
if mpf_lt(sb, ta): return True
if mpf_ge(sa, tb): return False
return None
def mpi_le(s, t):
sa, sb = s
ta, tb = t
if mpf_le(sb, ta): return True
if mpf_gt(sa, tb): return False
return None
def mpi_gt(s, t): return mpi_lt(t, s)
def mpi_ge(s, t): return mpi_le(t, s)
def mpi_add(s, t, prec=0):
sa, sb = s
ta, tb = t
a = mpf_add(sa, ta, prec, round_floor)
b = mpf_add(sb, tb, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = finf
return a, b
def mpi_sub(s, t, prec=0):
sa, sb = s
ta, tb = t
a = mpf_sub(sa, tb, prec, round_floor)
b = mpf_sub(sb, ta, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = finf
return a, b
def mpi_delta(s, prec):
sa, sb = s
return mpf_sub(sb, sa, prec, round_up)
def mpi_mid(s, prec):
sa, sb = s
return mpf_shift(mpf_add(sa, sb, prec, round_nearest), -1)
def mpi_pos(s, prec):
sa, sb = s
a = mpf_pos(sa, prec, round_floor)
b = mpf_pos(sb, prec, round_ceiling)
return a, b
def mpi_neg(s, prec=0):
sa, sb = s
a = mpf_neg(sb, prec, round_floor)
b = mpf_neg(sa, prec, round_ceiling)
return a, b
def mpi_abs(s, prec=0):
sa, sb = s
sas = mpf_sign(sa)
sbs = mpf_sign(sb)
# Both points nonnegative?
if sas >= 0:
a = mpf_pos(sa, prec, round_floor)
b = mpf_pos(sb, prec, round_ceiling)
# Upper point nonnegative?
elif sbs >= 0:
a = fzero
negsa = mpf_neg(sa)
if mpf_lt(negsa, sb):
b = mpf_pos(sb, prec, round_ceiling)
else:
b = mpf_pos(negsa, prec, round_ceiling)
# Both negative?
else:
a = mpf_neg(sb, prec, round_floor)
b = mpf_neg(sa, prec, round_ceiling)
return a, b
# TODO: optimize
def mpi_mul_mpf(s, t, prec):
return mpi_mul(s, (t, t), prec)
def mpi_div_mpf(s, t, prec):
return mpi_div(s, (t, t), prec)
def mpi_mul(s, t, prec=0):
sa, sb = s
ta, tb = t
sas = mpf_sign(sa)
sbs = mpf_sign(sb)
tas = mpf_sign(ta)
tbs = mpf_sign(tb)
if sas == sbs == 0:
# Should maybe be undefined
if ta == fninf or tb == finf:
return fninf, finf
return fzero, fzero
if tas == tbs == 0:
# Should maybe be undefined
if sa == fninf or sb == finf:
return fninf, finf
return fzero, fzero
if sas >= 0:
# positive * positive
if tas >= 0:
a = mpf_mul(sa, ta, prec, round_floor)
b = mpf_mul(sb, tb, prec, round_ceiling)
if a == fnan: a = fzero
if b == fnan: b = finf
# positive * negative
elif tbs <= 0:
a = mpf_mul(sb, ta, prec, round_floor)
b = mpf_mul(sa, tb, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = fzero
# positive * both signs
else:
a = mpf_mul(sb, ta, prec, round_floor)
b = mpf_mul(sb, tb, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = finf
elif sbs <= 0:
# negative * positive
if tas >= 0:
a = mpf_mul(sa, tb, prec, round_floor)
b = mpf_mul(sb, ta, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = fzero
# negative * negative
elif tbs <= 0:
a = mpf_mul(sb, tb, prec, round_floor)
b = mpf_mul(sa, ta, prec, round_ceiling)
if a == fnan: a = fzero
if b == fnan: b = finf
# negative * both signs
else:
a = mpf_mul(sa, tb, prec, round_floor)
b = mpf_mul(sa, ta, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = finf
else:
# General case: perform all cross-multiplications and compare
# Since the multiplications can be done exactly, we need only
# do 4 (instead of 8: two for each rounding mode)
cases = [mpf_mul(sa, ta), mpf_mul(sa, tb), mpf_mul(sb, ta), mpf_mul(sb, tb)]
if fnan in cases:
a, b = (fninf, finf)
else:
a, b = mpf_min_max(cases)
a = mpf_pos(a, prec, round_floor)
b = mpf_pos(b, prec, round_ceiling)
return a, b
def mpi_square(s, prec=0):
sa, sb = s
if mpf_ge(sa, fzero):
a = mpf_mul(sa, sa, prec, round_floor)
b = mpf_mul(sb, sb, prec, round_ceiling)
elif mpf_le(sb, fzero):
a = mpf_mul(sb, sb, prec, round_floor)
b = mpf_mul(sa, sa, prec, round_ceiling)
else:
sa = mpf_neg(sa)
sa, sb = mpf_min_max([sa, sb])
a = fzero
b = mpf_mul(sb, sb, prec, round_ceiling)
return a, b
def mpi_div(s, t, prec):
sa, sb = s
ta, tb = t
sas = mpf_sign(sa)
sbs = mpf_sign(sb)
tas = mpf_sign(ta)
tbs = mpf_sign(tb)
# 0 / X
if sas == sbs == 0:
# 0 / <interval containing 0>
if (tas < 0 and tbs > 0) or (tas == 0 or tbs == 0):
return fninf, finf
return fzero, fzero
# Denominator contains both negative and positive numbers;
# this should properly be a multi-interval, but the closest
# match is the entire (extended) real line
if tas < 0 and tbs > 0:
return fninf, finf
# Assume denominator to be nonnegative
if tas < 0:
return mpi_div(mpi_neg(s), mpi_neg(t), prec)
# Division by zero
# XXX: make sure all results make sense
if tas == 0:
# Numerator contains both signs?
if sas < 0 and sbs > 0:
return fninf, finf
if tas == tbs:
return fninf, finf
# Numerator positive?
if sas >= 0:
a = mpf_div(sa, tb, prec, round_floor)
b = finf
if sbs <= 0:
a = fninf
b = mpf_div(sb, tb, prec, round_ceiling)
# Division with positive denominator
# We still have to handle nans resulting from inf/0 or inf/inf
else:
# Nonnegative numerator
if sas >= 0:
a = mpf_div(sa, tb, prec, round_floor)
b = mpf_div(sb, ta, prec, round_ceiling)
if a == fnan: a = fzero
if b == fnan: b = finf
# Nonpositive numerator
elif sbs <= 0:
a = mpf_div(sa, ta, prec, round_floor)
b = mpf_div(sb, tb, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = fzero
# Numerator contains both signs?
else:
a = mpf_div(sa, ta, prec, round_floor)
b = mpf_div(sb, ta, prec, round_ceiling)
if a == fnan: a = fninf
if b == fnan: b = finf
return a, b
def mpi_pi(prec):
a = mpf_pi(prec, round_floor)
b = mpf_pi(prec, round_ceiling)
return a, b
def mpi_exp(s, prec):
sa, sb = s
# exp is monotonic
a = mpf_exp(sa, prec, round_floor)
b = mpf_exp(sb, prec, round_ceiling)
return a, b
def mpi_log(s, prec):
sa, sb = s
# log is monotonic
a = mpf_log(sa, prec, round_floor)
b = mpf_log(sb, prec, round_ceiling)
return a, b
def mpi_sqrt(s, prec):
sa, sb = s
# sqrt is monotonic
a = mpf_sqrt(sa, prec, round_floor)
b = mpf_sqrt(sb, prec, round_ceiling)
return a, b
def mpi_atan(s, prec):
sa, sb = s
a = mpf_atan(sa, prec, round_floor)
b = mpf_atan(sb, prec, round_ceiling)
return a, b
def mpi_pow_int(s, n, prec):
sa, sb = s
if n < 0:
return mpi_div((fone, fone), mpi_pow_int(s, -n, prec+20), prec)
if n == 0:
return (fone, fone)
if n == 1:
return s
if n == 2:
return mpi_square(s, prec)
# Odd -- signs are preserved
if n & 1:
a = mpf_pow_int(sa, n, prec, round_floor)
b = mpf_pow_int(sb, n, prec, round_ceiling)
# Even -- important to ensure positivity
else:
sas = mpf_sign(sa)
sbs = mpf_sign(sb)
# Nonnegative?
if sas >= 0:
a = mpf_pow_int(sa, n, prec, round_floor)
b = mpf_pow_int(sb, n, prec, round_ceiling)
# Nonpositive?
elif sbs <= 0:
a = mpf_pow_int(sb, n, prec, round_floor)
b = mpf_pow_int(sa, n, prec, round_ceiling)
# Mixed signs?
else:
a = fzero
# max(-a,b)**n
sa = mpf_neg(sa)
if mpf_ge(sa, sb):
b = mpf_pow_int(sa, n, prec, round_ceiling)
else:
b = mpf_pow_int(sb, n, prec, round_ceiling)
return a, b
def mpi_pow(s, t, prec):
ta, tb = t
if ta == tb and ta not in (finf, fninf):
if ta == from_int(to_int(ta)):
return mpi_pow_int(s, to_int(ta), prec)
if ta == fhalf:
return mpi_sqrt(s, prec)
u = mpi_log(s, prec + 20)
v = mpi_mul(u, t, prec + 20)
return mpi_exp(v, prec)
def MIN(x, y):
if mpf_le(x, y):
return x
return y
def MAX(x, y):
if mpf_ge(x, y):
return x
return y
def cos_sin_quadrant(x, wp):
sign, man, exp, bc = x
if x == fzero:
return fone, fzero, 0
# TODO: combine evaluation code to avoid duplicate modulo
c, s = mpf_cos_sin(x, wp)
t, n, wp_ = mod_pi2(man, exp, exp+bc, 15)
if sign:
n = -1-n
return c, s, n
def mpi_cos_sin(x, prec):
a, b = x
if a == b == fzero:
return (fone, fone), (fzero, fzero)
# Guaranteed to contain both -1 and 1
if (finf in x) or (fninf in x):
return (fnone, fone), (fnone, fone)
wp = prec + 20
ca, sa, na = cos_sin_quadrant(a, wp)
cb, sb, nb = cos_sin_quadrant(b, wp)
ca, cb = mpf_min_max([ca, cb])
sa, sb = mpf_min_max([sa, sb])
# Both functions are monotonic within one quadrant
if na == nb:
pass
# Guaranteed to contain both -1 and 1
elif nb - na >= 4:
return (fnone, fone), (fnone, fone)
else:
# cos has maximum between a and b
if na//4 != nb//4:
cb = fone
# cos has minimum
if (na-2)//4 != (nb-2)//4:
ca = fnone
# sin has maximum
if (na-1)//4 != (nb-1)//4:
sb = fone
# sin has minimum
if (na-3)//4 != (nb-3)//4:
sa = fnone
# Perturb to force interval rounding
more = from_man_exp((MPZ_ONE<<wp) + (MPZ_ONE<<10), -wp)
less = from_man_exp((MPZ_ONE<<wp) - (MPZ_ONE<<10), -wp)
def finalize(v, rounding):
if bool(v[0]) == (rounding == round_floor):
p = more
else:
p = less
v = mpf_mul(v, p, prec, rounding)
sign, man, exp, bc = v
if exp+bc >= 1:
if sign:
return fnone
return fone
return v
ca = finalize(ca, round_floor)
cb = finalize(cb, round_ceiling)
sa = finalize(sa, round_floor)
sb = finalize(sb, round_ceiling)
return (ca,cb), (sa,sb)
def mpi_cos(x, prec):
return mpi_cos_sin(x, prec)[0]
def mpi_sin(x, prec):
return mpi_cos_sin(x, prec)[1]
def mpi_tan(x, prec):
cos, sin = mpi_cos_sin(x, prec+20)
return mpi_div(sin, cos, prec)
def mpi_cot(x, prec):
cos, sin = mpi_cos_sin(x, prec+20)
return mpi_div(cos, sin, prec)
def mpi_from_str_a_b(x, y, percent, prec):
wp = prec + 20
xa = from_str(x, wp, round_floor)
xb = from_str(x, wp, round_ceiling)
#ya = from_str(y, wp, round_floor)
y = from_str(y, wp, round_ceiling)
assert mpf_ge(y, fzero)
if percent:
y = mpf_mul(MAX(mpf_abs(xa), mpf_abs(xb)), y, wp, round_ceiling)
y = mpf_div(y, from_int(100), wp, round_ceiling)
a = mpf_sub(xa, y, prec, round_floor)
b = mpf_add(xb, y, prec, round_ceiling)
return a, b
def mpi_from_str(s, prec):
"""
Parse an interval number given as a string.
Allowed forms are
"-1.23e-27"
Any single decimal floating-point literal.
"a +- b" or "a (b)"
a is the midpoint of the interval and b is the half-width
"a +- b%" or "a (b%)"
a is the midpoint of the interval and the half-width
is b percent of a (`a \times b / 100`).
"[a, b]"
The interval indicated directly.
"x[y,z]e"
x are shared digits, y and z are unequal digits, e is the exponent.
"""
e = ValueError("Improperly formed interval number '%s'" % s)
s = s.replace(" ", "")
wp = prec + 20
if "+-" in s:
x, y = s.split("+-")
return mpi_from_str_a_b(x, y, False, prec)
# case 2
elif "(" in s:
# Don't confuse with a complex number (x,y)
if s[0] == "(" or ")" not in s:
raise e
s = s.replace(")", "")
percent = False
if "%" in s:
if s[-1] != "%":
raise e
percent = True
s = s.replace("%", "")
x, y = s.split("(")
return mpi_from_str_a_b(x, y, percent, prec)
elif "," in s:
if ('[' not in s) or (']' not in s):
raise e
if s[0] == '[':
# case 3
s = s.replace("[", "")
s = s.replace("]", "")
a, b = s.split(",")
a = from_str(a, prec, round_floor)
b = from_str(b, prec, round_ceiling)
return a, b
else:
# case 4
x, y = s.split('[')
y, z = y.split(',')
if 'e' in s:
z, e = z.split(']')
else:
z, e = z.rstrip(']'), ''
a = from_str(x+y+e, prec, round_floor)
b = from_str(x+z+e, prec, round_ceiling)
return a, b
else:
a = from_str(s, prec, round_floor)
b = from_str(s, prec, round_ceiling)
return a, b
def mpi_to_str(x, dps, use_spaces=True, brackets='[]', mode='brackets', error_dps=4, **kwargs):
"""
Convert a mpi interval to a string.
**Arguments**
*dps*
decimal places to use for printing
*use_spaces*
use spaces for more readable output, defaults to true
*brackets*
pair of strings (or two-character string) giving left and right brackets
*mode*
mode of display: 'plusminus', 'percent', 'brackets' (default) or 'diff'
*error_dps*
limit the error to *error_dps* digits (mode 'plusminus and 'percent')
Additional keyword arguments are forwarded to the mpf-to-string conversion
for the components of the output.
**Examples**
>>> from mpmath import mpi, mp
>>> mp.dps = 30
>>> x = mpi(1, 2)
>>> mpi_to_str(x, mode='plusminus')
'1.5 +- 5.0e-1'
>>> mpi_to_str(x, mode='percent')
'1.5 (33.33%)'
>>> mpi_to_str(x, mode='brackets')
'[1.0, 2.0]'
>>> mpi_to_str(x, mode='brackets' , brackets=('<', '>'))
'<1.0, 2.0>'
>>> x = mpi('5.2582327113062393041', '5.2582327113062749951')
>>> mpi_to_str(x, mode='diff')
'5.2582327113062[4, 7]'
>>> mpi_to_str(mpi(0), mode='percent')
'0.0 (0%)'
"""
prec = dps_to_prec(dps)
wp = prec + 20
a, b = x
mid = mpi_mid(x, prec)
delta = mpi_delta(x, prec)
a_str = to_str(a, dps, **kwargs)
b_str = to_str(b, dps, **kwargs)
mid_str = to_str(mid, dps, **kwargs)
sp = ""
if use_spaces:
sp = " "
br1, br2 = brackets
if mode == 'plusminus':
delta_str = to_str(mpf_shift(delta,-1), dps, **kwargs)
s = mid_str + sp + "+-" + sp + delta_str
elif mode == 'percent':
if mid == fzero:
p = fzero
else:
# p = 100 * delta(x) / (2*mid(x))
p = mpf_mul(delta, from_int(100))
p = mpf_div(p, mpf_mul(mid, from_int(2)), wp)
s = mid_str + sp + "(" + to_str(p, error_dps) + "%)"
elif mode == 'brackets':
s = br1 + a_str + "," + sp + b_str + br2
elif mode == 'diff':
# use more digits if str(x.a) and str(x.b) are equal
if a_str == b_str:
a_str = to_str(a, dps+3, **kwargs)
b_str = to_str(b, dps+3, **kwargs)
# separate mantissa and exponent
a = a_str.split('e')
if len(a) == 1:
a.append('')
b = b_str.split('e')
if len(b) == 1:
b.append('')
if a[1] == b[1]:
if a[0] != b[0]:
for i in xrange(len(a[0]) + 1):
if a[0][i] != b[0][i]:
break
s = (a[0][:i] + br1 + a[0][i:] + ',' + sp + b[0][i:] + br2
+ 'e'*min(len(a[1]), 1) + a[1])
else: # no difference
s = a[0] + br1 + br2 + 'e'*min(len(a[1]), 1) + a[1]
else:
s = br1 + 'e'.join(a) + ',' + sp + 'e'.join(b) + br2
else:
raise ValueError("'%s' is unknown mode for printing mpi" % mode)
return s
def mpci_add(x, y, prec):
a, b = x
c, d = y
return mpi_add(a, c, prec), mpi_add(b, d, prec)
def mpci_sub(x, y, prec):
a, b = x
c, d = y
return mpi_sub(a, c, prec), mpi_sub(b, d, prec)
def mpci_neg(x, prec=0):
a, b = x
return mpi_neg(a, prec), mpi_neg(b, prec)
def mpci_pos(x, prec):
a, b = x
return mpi_pos(a, prec), mpi_pos(b, prec)
def mpci_mul(x, y, prec):
# TODO: optimize for real/imag cases
a, b = x
c, d = y
r1 = mpi_mul(a,c)
r2 = mpi_mul(b,d)
re = mpi_sub(r1,r2,prec)
i1 = mpi_mul(a,d)
i2 = mpi_mul(b,c)
im = mpi_add(i1,i2,prec)
return re, im
def mpci_div(x, y, prec):
# TODO: optimize for real/imag cases
a, b = x
c, d = y
wp = prec+20
m1 = mpi_square(c)
m2 = mpi_square(d)
m = mpi_add(m1,m2,wp)
re = mpi_add(mpi_mul(a,c), mpi_mul(b,d), wp)
im = mpi_sub(mpi_mul(b,c), mpi_mul(a,d), wp)
re = mpi_div(re, m, prec)
im = mpi_div(im, m, prec)
return re, im
def mpci_exp(x, prec):
a, b = x
wp = prec+20
r = mpi_exp(a, wp)
c, s = mpi_cos_sin(b, wp)
a = mpi_mul(r, c, prec)
b = mpi_mul(r, s, prec)
return a, b
def mpi_shift(x, n):
a, b = x
return mpf_shift(a,n), mpf_shift(b,n)
def mpi_cosh_sinh(x, prec):
# TODO: accuracy for small x
wp = prec+20
e1 = mpi_exp(x, wp)
e2 = mpi_div(mpi_one, e1, wp)
c = mpi_add(e1, e2, prec)
s = mpi_sub(e1, e2, prec)
c = mpi_shift(c, -1)
s = mpi_shift(s, -1)
return c, s
def mpci_cos(x, prec):
a, b = x
wp = prec+10
c, s = mpi_cos_sin(a, wp)
ch, sh = mpi_cosh_sinh(b, wp)
re = mpi_mul(c, ch, prec)
im = mpi_mul(s, sh, prec)
return re, mpi_neg(im)
def mpci_sin(x, prec):
a, b = x
wp = prec+10
c, s = mpi_cos_sin(a, wp)
ch, sh = mpi_cosh_sinh(b, wp)
re = mpi_mul(s, ch, prec)
im = mpi_mul(c, sh, prec)
return re, im
def mpci_abs(x, prec):
a, b = x
if a == mpi_zero:
return mpi_abs(b)
if b == mpi_zero:
return mpi_abs(a)
# Important: nonnegative
a = mpi_square(a)
b = mpi_square(b)
t = mpi_add(a, b, prec+20)
return mpi_sqrt(t, prec)
def mpi_atan2(y, x, prec):
ya, yb = y
xa, xb = x
# Constrained to the real line
if ya == yb == fzero:
if mpf_ge(xa, fzero):
return mpi_zero
return mpi_pi(prec)
# Right half-plane
if mpf_ge(xa, fzero):
if mpf_ge(ya, fzero):
a = mpf_atan2(ya, xb, prec, round_floor)
else:
a = mpf_atan2(ya, xa, prec, round_floor)
if mpf_ge(yb, fzero):
b = mpf_atan2(yb, xa, prec, round_ceiling)
else:
b = mpf_atan2(yb, xb, prec, round_ceiling)
# Upper half-plane
elif mpf_ge(ya, fzero):
b = mpf_atan2(ya, xa, prec, round_ceiling)
if mpf_le(xb, fzero):
a = mpf_atan2(yb, xb, prec, round_floor)
else:
a = mpf_atan2(ya, xb, prec, round_floor)
# Lower half-plane
elif mpf_le(yb, fzero):
a = mpf_atan2(yb, xa, prec, round_floor)
if mpf_le(xb, fzero):
b = mpf_atan2(ya, xb, prec, round_ceiling)
else:
b = mpf_atan2(yb, xb, prec, round_ceiling)
# Covering the origin
else:
b = mpf_pi(prec, round_ceiling)
a = mpf_neg(b)
return a, b
def mpci_arg(z, prec):
x, y = z
return mpi_atan2(y, x, prec)
def mpci_log(z, prec):
x, y = z
re = mpi_log(mpci_abs(z, prec+20), prec)
im = mpci_arg(z, prec)
return re, im
def mpci_pow(x, y, prec):
# TODO: recognize/speed up real cases, integer y
yre, yim = y
if yim == mpi_zero:
ya, yb = yre
if ya == yb:
sign, man, exp, bc = yb
if man and exp >= 0:
return mpci_pow_int(x, (-1)**sign * int(man<<exp), prec)
# x^0
if yb == fzero:
return mpci_pow_int(x, 0, prec)
wp = prec+20
return mpci_exp(mpci_mul(y, mpci_log(x, wp), wp), prec)
def mpci_square(x, prec):
a, b = x
# (a+bi)^2 = (a^2-b^2) + 2abi
re = mpi_sub(mpi_square(a), mpi_square(b), prec)
im = mpi_mul(a, b, prec)
im = mpi_shift(im, 1)
return re, im
def mpci_pow_int(x, n, prec):
if n < 0:
return mpci_div((mpi_one,mpi_zero), mpci_pow_int(x, -n, prec+20), prec)
if n == 0:
return mpi_one, mpi_zero
if n == 1:
return mpci_pos(x, prec)
if n == 2:
return mpci_square(x, prec)
wp = prec + 20
result = (mpi_one, mpi_zero)
while n:
if n & 1:
result = mpci_mul(result, x, wp)
n -= 1
x = mpci_square(x, wp)
n >>= 1
return mpci_pos(result, prec)
gamma_min_a = from_float(1.46163214496)
gamma_min_b = from_float(1.46163214497)
gamma_min = (gamma_min_a, gamma_min_b)
gamma_mono_imag_a = from_float(-1.1)
gamma_mono_imag_b = from_float(1.1)
def mpi_overlap(x, y):
a, b = x
c, d = y
if mpf_lt(d, a): return False
if mpf_gt(c, b): return False
return True
# type = 0 -- gamma
# type = 1 -- factorial
# type = 2 -- 1/gamma
# type = 3 -- log-gamma
def mpi_gamma(z, prec, type=0):
a, b = z
wp = prec+20
if type == 1:
return mpi_gamma(mpi_add(z, mpi_one, wp), prec, 0)
# increasing
if mpf_gt(a, gamma_min_b):
if type == 0:
c = mpf_gamma(a, prec, round_floor)
d = mpf_gamma(b, prec, round_ceiling)
elif type == 2:
c = mpf_rgamma(b, prec, round_floor)
d = mpf_rgamma(a, prec, round_ceiling)
elif type == 3:
c = mpf_loggamma(a, prec, round_floor)
d = mpf_loggamma(b, prec, round_ceiling)
# decreasing
elif mpf_gt(a, fzero) and mpf_lt(b, gamma_min_a):
if type == 0:
c = mpf_gamma(b, prec, round_floor)
d = mpf_gamma(a, prec, round_ceiling)
elif type == 2:
c = mpf_rgamma(a, prec, round_floor)
d = mpf_rgamma(b, prec, round_ceiling)
elif type == 3:
c = mpf_loggamma(b, prec, round_floor)
d = mpf_loggamma(a, prec, round_ceiling)
else:
# TODO: reflection formula
znew = mpi_add(z, mpi_one, wp)
if type == 0: return mpi_div(mpi_gamma(znew, prec+2, 0), z, prec)
if type == 2: return mpi_mul(mpi_gamma(znew, prec+2, 2), z, prec)
if type == 3: return mpi_sub(mpi_gamma(znew, prec+2, 3), mpi_log(z, prec+2), prec)
return c, d
def mpci_gamma(z, prec, type=0):
(a1,a2), (b1,b2) = z
# Real case
if b1 == b2 == fzero and (type != 3 or mpf_gt(a1,fzero)):
return mpi_gamma(z, prec, type), mpi_zero
# Estimate precision
wp = prec+20
if type != 3:
amag = a2[2]+a2[3]
bmag = b2[2]+b2[3]
if a2 != fzero:
mag = max(amag, bmag)
else:
mag = bmag
an = abs(to_int(a2))
bn = abs(to_int(b2))
absn = max(an, bn)
gamma_size = max(0,absn*mag)
wp += bitcount(gamma_size)
# Assume type != 1
if type == 1:
(a1,a2) = mpi_add((a1,a2), mpi_one, wp); z = (a1,a2), (b1,b2)
type = 0
# Avoid non-monotonic region near the negative real axis
if mpf_lt(a1, gamma_min_b):
if mpi_overlap((b1,b2), (gamma_mono_imag_a, gamma_mono_imag_b)):
# TODO: reflection formula
#if mpf_lt(a2, mpf_shift(fone,-1)):
# znew = mpci_sub((mpi_one,mpi_zero),z,wp)
# ...
# Recurrence:
# gamma(z) = gamma(z+1)/z
znew = mpi_add((a1,a2), mpi_one, wp), (b1,b2)
if type == 0: return mpci_div(mpci_gamma(znew, prec+2, 0), z, prec)
if type == 2: return mpci_mul(mpci_gamma(znew, prec+2, 2), z, prec)
if type == 3: return mpci_sub(mpci_gamma(znew, prec+2, 3), mpci_log(z,prec+2), prec)
# Use monotonicity (except for a small region close to the
# origin and near poles)
# upper half-plane
if mpf_ge(b1, fzero):
minre = mpc_loggamma((a1,b2), wp, round_floor)
maxre = mpc_loggamma((a2,b1), wp, round_ceiling)
minim = mpc_loggamma((a1,b1), wp, round_floor)
maxim = mpc_loggamma((a2,b2), wp, round_ceiling)
# lower half-plane
elif mpf_le(b2, fzero):
minre = mpc_loggamma((a1,b1), wp, round_floor)
maxre = mpc_loggamma((a2,b2), wp, round_ceiling)
minim = mpc_loggamma((a2,b1), wp, round_floor)
maxim = mpc_loggamma((a1,b2), wp, round_ceiling)
# crosses real axis
else:
maxre = mpc_loggamma((a2,fzero), wp, round_ceiling)
# stretches more into the lower half-plane
if mpf_gt(mpf_neg(b1), b2):
minre = mpc_loggamma((a1,b1), wp, round_ceiling)
else:
minre = mpc_loggamma((a1,b2), wp, round_ceiling)
minim = mpc_loggamma((a2,b1), wp, round_floor)
maxim = mpc_loggamma((a2,b2), wp, round_floor)
w = (minre[0], maxre[0]), (minim[1], maxim[1])
if type == 3:
return mpi_pos(w[0], prec), mpi_pos(w[1], prec)
if type == 2:
w = mpci_neg(w)
return mpci_exp(w, prec)
def mpi_loggamma(z, prec): return mpi_gamma(z, prec, type=3)
def mpci_loggamma(z, prec): return mpci_gamma(z, prec, type=3)
def mpi_rgamma(z, prec): return mpi_gamma(z, prec, type=2)
def mpci_rgamma(z, prec): return mpci_gamma(z, prec, type=2)
def mpi_factorial(z, prec): return mpi_gamma(z, prec, type=1)
def mpci_factorial(z, prec): return mpci_gamma(z, prec, type=1)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/libmpi.py
|
libmpi.py
|
import math
from .backend import xrange
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy
from .libintmath import list_primes, ifac, ifac2, moebius
from .libmpf import (\
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast,
lshift, sqrt_fixed, isqrt_fast,
fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan,
from_int, to_int, to_fixed, from_man_exp, from_rational,
mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub,
mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int,
mpf_rdiv_int,
mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift,
negative_rnd, reciprocal_rnd,
bitcount, to_float, mpf_floor, mpf_sign, ComplexResult
)
from .libelefun import (\
constant_memo,
def_mpf_constant,
mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2,
mpf_exp, mpf_log, mpf_pow, mpf_cosh,
mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi,
ln_sqrt2pi_fixed, mpf_ln_sqrt2pi, sqrtpi_fixed, mpf_sqrtpi,
cos_sin_fixed, exp_fixed
)
from .libmpc import (\
mpc_zero, mpc_one, mpc_half, mpc_two,
mpc_abs, mpc_shift, mpc_pos, mpc_neg,
mpc_add, mpc_sub, mpc_mul, mpc_div,
mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div,
mpc_mul_int, mpc_pow_int,
mpc_log, mpc_exp, mpc_pow,
mpc_cos_pi, mpc_sin_pi,
mpc_reciprocal, mpc_square,
mpc_sub_mpf
)
# Catalan's constant is computed using Lupas's rapidly convergent series
# (listed on http://mathworld.wolfram.com/CatalansConstant.html)
# oo
# ___ n-1 8n 2 3 2
# 1 \ (-1) 2 (40n - 24n + 3) [(2n)!] (n!)
# K = --- ) -----------------------------------------
# 64 /___ 3 2
# n (2n-1) [(4n)!]
# n = 1
@constant_memo
def catalan_fixed(prec):
prec = prec + 20
a = one = MPZ_ONE << prec
s, t, n = 0, 1, 1
while t:
a *= 32 * n**3 * (2*n-1)
a //= (3-16*n+16*n**2)**2
t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1))
s += t
n += 1
return s >> (20 + 6)
# Khinchin's constant is relatively difficult to compute. Here
# we use the rational zeta series
# oo 2*n-1
# ___ ___
# \ ` zeta(2*n)-1 \ ` (-1)^(k+1)
# log(K)*log(2) = ) ------------ ) ----------
# /___. n /___. k
# n = 1 k = 1
# which adds half a digit per term. The essential trick for achieving
# reasonable efficiency is to recycle both the values of the zeta
# function (essentially Bernoulli numbers) and the partial terms of
# the inner sum.
# An alternative might be to use K = 2*exp[1/log(2) X] where
# / 1 1 [ pi*x*(1-x^2) ]
# X = | ------ log [ ------------ ].
# / 0 x(1+x) [ sin(pi*x) ]
# and integrate numerically. In practice, this seems to be slightly
# slower than the zeta series at high precision.
@constant_memo
def khinchin_fixed(prec):
wp = int(prec + prec**0.5 + 15)
s = MPZ_ZERO
fac = from_int(4)
t = ONE = MPZ_ONE << wp
pi = mpf_pi(wp)
pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2)
n = 1
while 1:
zeta2n = mpf_abs(mpf_bernoulli(2*n, wp))
zeta2n = mpf_mul(zeta2n, pipow, wp)
zeta2n = mpf_div(zeta2n, fac, wp)
zeta2n = to_fixed(zeta2n, wp)
term = (((zeta2n - ONE) * t) // n) >> wp
if term < 100:
break
#if not n % 10:
# print n, math.log(int(abs(term)))
s += term
t += ONE//(2*n+1) - ONE//(2*n)
n += 1
fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp)
pipow = mpf_mul(pipow, twopi2, wp)
s = (s << wp) // ln2_fixed(wp)
K = mpf_exp(from_man_exp(s, -wp), wp)
K = to_fixed(K, prec)
return K
# Glaisher's constant is defined as A = exp(1/2 - zeta'(-1)).
# One way to compute it would be to perform direct numerical
# differentiation, but computing arbitrary Riemann zeta function
# values at high precision is expensive. We instead use the formula
# A = exp((6 (-zeta'(2))/pi^2 + log 2 pi + gamma)/12)
# and compute zeta'(2) from the series representation
# oo
# ___
# \ log k
# -zeta'(2) = ) -----
# /___ 2
# k
# k = 2
# This series converges exceptionally slowly, but can be accelerated
# using Euler-Maclaurin formula. The important insight is that the
# E-M integral can be done in closed form and that the high order
# are given by
# n / \
# d | log x | a + b log x
# --- | ----- | = -----------
# n | 2 | 2 + n
# dx \ x / x
# where a and b are integers given by a simple recurrence. Note
# that just one logarithm is needed. However, lots of integer
# logarithms are required for the initial summation.
# This algorithm could possibly be turned into a faster algorithm
# for general evaluation of zeta(s) or zeta'(s); this should be
# looked into.
@constant_memo
def glaisher_fixed(prec):
wp = prec + 30
# Number of direct terms to sum before applying the Euler-Maclaurin
# formula to the tail. TODO: choose more intelligently
N = int(0.33*prec + 5)
ONE = MPZ_ONE << wp
# Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1
s = MPZ_ZERO
for k in range(2, N):
#print k, N
s += log_int_fixed(k, wp) // k**2
logN = log_int_fixed(N, wp)
#logN = to_fixed(mpf_log(from_int(N), wp+20), wp)
# E-M step 2: integral of log(x)/x**2 from N to inf
s += (ONE + logN) // N
# E-M step 3: endpoint correction term f(N)/2
s += logN // (N**2 * 2)
# E-M step 4: the series of derivatives
pN = N**3
a = 1
b = -2
j = 3
fac = from_int(2)
k = 1
while 1:
# D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative]
D = ((a << wp) + b*logN) // pN
D = from_man_exp(D, -wp)
B = mpf_bernoulli(2*k, wp)
term = mpf_mul(B, D, wp)
term = mpf_div(term, fac, wp)
term = to_fixed(term, wp)
if abs(term) < 100:
break
#if not k % 10:
# print k, math.log(int(abs(term)), 10)
s -= term
# Advance derivative twice
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
k += 1
fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp)
# A = exp((6*s/pi**2 + log(2*pi) + euler)/12)
pi = pi_fixed(wp)
s *= 6
s = (s << wp) // (pi**2 >> wp)
s += euler_fixed(wp)
s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp)
s //= 12
A = mpf_exp(from_man_exp(s, -wp), wp)
return to_fixed(A, prec)
# Apery's constant can be computed using the very rapidly convergent
# series
# oo
# ___ 2 10
# \ n 205 n + 250 n + 77 (n!)
# zeta(3) = ) (-1) ------------------- ----------
# /___ 64 5
# n = 0 ((2n+1)!)
@constant_memo
def apery_fixed(prec):
prec += 20
d = MPZ_ONE << prec
term = MPZ(77) << prec
n = 1
s = MPZ_ZERO
while term:
s += term
d *= (n**10)
d //= (((2*n+1)**5) * (2*n)**5)
term = (-1)**n * (205*(n**2) + 250*n + 77) * d
n += 1
return s >> (20 + 6)
"""
Euler's constant (gamma) is computed using the Brent-McMillan formula,
gamma ~= I(n)/J(n) - log(n), where
I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k)
J(n) = sum_{k=0,1,2,...} (n**k / k!)**2
H(k) = 1 + 1/2 + 1/3 + ... + 1/k
The error is bounded by O(exp(-4n)). Choosing n to be a power
of two, 2**p, the logarithm becomes particularly easy to calculate.[1]
We use the formulation of Algorithm 3.9 in [2] to make the summation
more efficient.
Reference:
[1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma
http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf
[2] Jonathan Borwein & David Bailey, Mathematics by Experiment,
A K Peters, 2003
"""
@constant_memo
def euler_fixed(prec):
extra = 30
prec += extra
# choose p such that exp(-4*(2**p)) < 2**-n
p = int(math.log((prec/4) * math.log(2), 2)) + 1
n = 2**p
A = U = -p*ln2_fixed(prec)
B = V = MPZ_ONE << prec
k = 1
while 1:
B = B*n**2//k**2
A = (A*n**2//k + B)//k
U += A
V += B
if max(abs(A), abs(B)) < 100:
break
k += 1
return (U<<(prec-extra))//V
# Use zeta accelerated formulas for the Mertens and twin
# prime constants; see
# http://mathworld.wolfram.com/MertensConstant.html
# http://mathworld.wolfram.com/TwinPrimesConstant.html
@constant_memo
def mertens_fixed(prec):
wp = prec + 20
m = 2
s = mpf_euler(wp)
while 1:
t = mpf_zeta_int(m, wp)
if t == fone:
break
t = mpf_log(t, wp)
t = mpf_mul_int(t, moebius(m), wp)
t = mpf_div(t, from_int(m), wp)
s = mpf_add(s, t)
m += 1
return to_fixed(s, prec)
@constant_memo
def twinprime_fixed(prec):
def I(n):
return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n
wp = 2*prec + 30
res = fone
primes = [from_rational(1,p,wp) for p in [2,3,5,7]]
ppowers = [mpf_mul(p,p,wp) for p in primes]
n = 2
while 1:
a = mpf_zeta_int(n, wp)
for i in range(4):
a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp)
ppowers[i] = mpf_mul(ppowers[i], primes[i], wp)
a = mpf_pow_int(a, -I(n), wp)
if mpf_pos(a, prec+10, 'n') == fone:
break
#from libmpf import to_str
#print n, to_str(mpf_sub(fone, a), 6)
res = mpf_mul(res, a, wp)
n += 1
res = mpf_mul(res, from_int(3*15*35), wp)
res = mpf_div(res, from_int(4*16*36), wp)
return to_fixed(res, prec)
mpf_euler = def_mpf_constant(euler_fixed)
mpf_apery = def_mpf_constant(apery_fixed)
mpf_khinchin = def_mpf_constant(khinchin_fixed)
mpf_glaisher = def_mpf_constant(glaisher_fixed)
mpf_catalan = def_mpf_constant(catalan_fixed)
mpf_mertens = def_mpf_constant(mertens_fixed)
mpf_twinprime = def_mpf_constant(twinprime_fixed)
#-----------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#-----------------------------------------------------------------------#
MAX_BERNOULLI_CACHE = 3000
"""
Small Bernoulli numbers and factorials are used in numerous summations,
so it is critical for speed that sequential computation is fast and that
values are cached up to a fairly high threshold.
On the other hand, we also want to support fast computation of isolated
large numbers. Currently, no such acceleration is provided for integer
factorials (though it is for large floating-point factorials, which are
computed via gamma if the precision is low enough).
For sequential computation of Bernoulli numbers, we use Ramanujan's formula
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
For isolated large Bernoulli numbers, we use the Riemann zeta function
to calculate a numerical value for B_n. The von Staudt-Clausen theorem
can then be used to optionally find the exact value of the
numerator and denominator.
"""
bernoulli_cache = {}
f3 = from_int(3)
f6 = from_int(6)
def bernoulli_size(n):
"""Accurately estimate the size of B_n (even n > 2 only)"""
lgn = math.log(n,2)
return int(2.326 + 0.5*lgn + n*(lgn - 4.094))
BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE)
def mpf_bernoulli(n, prec, rnd=None):
"""Computation of Bernoulli numbers (numerically)"""
if n < 2:
if n < 0:
raise ValueError("Bernoulli numbers only defined for n >= 0")
if n == 0:
return fone
if n == 1:
return mpf_neg(fhalf)
# For odd n > 1, the Bernoulli numbers are zero
if n & 1:
return fzero
# If precision is extremely high, we can save time by computing
# the Bernoulli number at a lower precision that is sufficient to
# obtain the exact fraction, round to the exact fraction, and
# convert the fraction back to an mpf value at the original precision
if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000:
p, q = bernfrac(n)
return from_rational(p, q, prec, rnd or round_floor)
if n > MAX_BERNOULLI_CACHE:
return mpf_bernoulli_huge(n, prec, rnd)
wp = prec + 30
# Reuse nearby precisions
wp += 32 - (prec & 31)
cached = bernoulli_cache.get(wp)
if cached:
numbers, state = cached
if n in numbers:
if not rnd:
return numbers[n]
return mpf_pos(numbers[n], prec, rnd)
m, bin, bin1 = state
if n - m > 10:
return mpf_bernoulli_huge(n, prec, rnd)
else:
if n > 10:
return mpf_bernoulli_huge(n, prec, rnd)
numbers = {0:fone}
m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE]
bernoulli_cache[wp] = (numbers, state)
while m <= n:
#print m
case = m % 6
# Accurately estimate size of B_m so we can use
# fixed point math without using too much precision
szbm = bernoulli_size(m)
s = 0
sexp = max(0, szbm) - wp
if m < 6:
a = MPZ_ZERO
else:
a = bin1
for j in xrange(1, m//6+1):
usign, uman, uexp, ubc = u = numbers[m-6*j]
if usign:
uman = -uman
s += lshift(a*uman, uexp-sexp)
# Update inner binomial coefficient
j6 = 6*j
a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6))
a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6))
if case == 0: b = mpf_rdiv_int(m+3, f3, wp)
if case == 2: b = mpf_rdiv_int(m+3, f3, wp)
if case == 4: b = mpf_rdiv_int(-m-3, f6, wp)
s = from_man_exp(s, sexp, wp)
b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp)
numbers[m] = b
m += 2
# Update outer binomial coefficient
bin = bin * ((m+2)*(m+3)) // (m*(m-1))
if m > 6:
bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6))
state[:] = [m, bin, bin1]
return numbers[n]
def mpf_bernoulli_huge(n, prec, rnd=None):
wp = prec + 10
piprec = wp + int(math.log(n,2))
v = mpf_gamma_int(n+1, wp)
v = mpf_mul(v, mpf_zeta_int(n, wp), wp)
v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp))
v = mpf_shift(v, 1-n)
if not n & 3:
v = mpf_neg(v)
return mpf_pos(v, prec, rnd or round_fast)
def bernfrac(n):
r"""
Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly,
where `B_n` denotes the `n`-th Bernoulli number. The fraction is
always reduced to lowest terms. Note that for `n > 1` and `n` odd,
`B_n = 0`, and `(0, 1)` is returned.
**Examples**
The first few Bernoulli numbers are exactly::
>>> from mpmath import *
>>> for n in range(15):
... p, q = bernfrac(n)
... print("%s %s/%s" % (n, p, q))
...
0 1/1
1 -1/2
2 1/6
3 0/1
4 -1/30
5 0/1
6 1/42
7 0/1
8 -1/30
9 0/1
10 5/66
11 0/1
12 -691/2730
13 0/1
14 7/6
This function works for arbitrarily large `n`::
>>> p, q = bernfrac(10**4)
>>> print(q)
2338224387510
>>> print(len(str(p)))
27692
>>> mp.dps = 15
>>> print(mpf(p) / q)
-9.04942396360948e+27677
>>> print(bernoulli(10**4))
-9.04942396360948e+27677
.. note ::
:func:`~mpmath.bernoulli` computes a floating-point approximation
directly, without computing the exact fraction first.
This is much faster for large `n`.
**Algorithm**
:func:`~mpmath.bernfrac` works by computing the value of `B_n` numerically
and then using the von Staudt-Clausen theorem [1] to reconstruct
the exact fraction. For large `n`, this is significantly faster than
computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic.
The implementation has been tested for `n = 10^m` up to `m = 6`.
In practice, :func:`~mpmath.bernfrac` appears to be about three times
slower than the specialized program calcbn.exe [2]
**References**
1. MathWorld, von Staudt-Clausen Theorem:
http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html
2. The Bernoulli Number Page:
http://www.bernoulli.org/
"""
n = int(n)
if n < 3:
return [(1, 1), (-1, 2), (1, 6)][n]
if n & 1:
return (0, 1)
q = 1
for k in list_primes(n+1):
if not (n % (k-1)):
q *= k
prec = bernoulli_size(n) + int(math.log(q,2)) + 20
b = mpf_bernoulli(n, prec)
p = mpf_mul(b, from_int(q))
pint = to_int(p, round_nearest)
return (pint, q)
#-----------------------------------------------------------------------#
# #
# The gamma function (OLD IMPLEMENTATION) #
# #
#-----------------------------------------------------------------------#
"""
We compute the real factorial / gamma function using Spouge's approximation
x! = (x+a)**(x+1/2) * exp(-x-a) * [c_0 + S(x) + eps]
where S(x) is the sum of c_k/(x+k) from k = 1 to a-1 and the coefficients
are given by
c_0 = sqrt(2*pi)
(-1)**(k-1)
c_k = ----------- (a-k)**(k-1/2) exp(-k+a), k = 1,2,...,a-1
(k - 1)!
As proved by Spouge, if we choose a = log(2)/log(2*pi)*n = 0.38*n, the
relative error eps is less than 2^(-n) for any x in the right complex
half-plane (assuming a > 2). In practice, it seems that a can be chosen
quite a bit lower still (30-50%); this possibility should be investigated.
For negative x, we use the reflection formula.
References:
-----------
John L. Spouge, "Computation of the gamma, digamma, and trigamma
functions", SIAM Journal on Numerical Analysis 31 (1994), no. 3, 931-944.
"""
spouge_cache = {}
def calc_spouge_coefficients(a, prec):
wp = prec + int(a*1.4)
c = [0] * a
# b = exp(a-1)
b = mpf_exp(from_int(a-1), wp)
# e = exp(1)
e = mpf_exp(fone, wp)
# sqrt(2*pi)
sq2pi = mpf_sqrt(mpf_shift(mpf_pi(wp), 1), wp)
c[0] = to_fixed(sq2pi, prec)
for k in xrange(1, a):
# c[k] = ((-1)**(k-1) * (a-k)**k) * b / sqrt(a-k)
term = mpf_mul_int(b, ((-1)**(k-1) * (a-k)**k), wp)
term = mpf_div(term, mpf_sqrt(from_int(a-k), wp), wp)
c[k] = to_fixed(term, prec)
# b = b / (e * k)
b = mpf_div(b, mpf_mul(e, from_int(k), wp), wp)
return c
# Cached lookup of coefficients
def get_spouge_coefficients(prec):
# This exact precision has been used before
if prec in spouge_cache:
return spouge_cache[prec]
for p in spouge_cache:
if 0.8 <= prec/float(p) < 1:
return spouge_cache[p]
# Here we estimate the value of a based on Spouge's inequality for
# the relative error
a = max(3, int(0.38*prec)) # 0.38 = log(2)/log(2*pi), ~= 1.26*n
coefs = calc_spouge_coefficients(a, prec)
spouge_cache[prec] = (prec, a, coefs)
return spouge_cache[prec]
def spouge_sum_real(x, prec, a, c):
x = to_fixed(x, prec)
s = c[0]
for k in xrange(1, a):
s += (c[k] << prec) // (x + (k << prec))
return from_man_exp(s, -prec, prec, round_floor)
# Unused: for fast computation of gamma(p/q)
def spouge_sum_rational(p, q, prec, a, c):
s = c[0]
for k in xrange(1, a):
s += c[k] * q // (p+q*k)
return from_man_exp(s, -prec, prec, round_floor)
# For a complex number a + b*I, we have
#
# c_k (a+k)*c_k b * c_k
# ------------- = --------- - ------- * I
# (a + b*I) + k M M
#
# 2 2 2 2 2
# where M = (a+k) + b = (a + b ) + (2*a*k + k )
def spouge_sum_complex(re, im, prec, a, c):
re = to_fixed(re, prec)
im = to_fixed(im, prec)
sre, sim = c[0], 0
mag = ((re**2)>>prec) + ((im**2)>>prec)
for k in xrange(1, a):
M = mag + re*(2*k) + ((k**2) << prec)
sre += (c[k] * (re + (k << prec))) // M
sim -= (c[k] * im) // M
re = from_man_exp(sre, -prec, prec, round_floor)
im = from_man_exp(sim, -prec, prec, round_floor)
return re, im
def mpf_gamma_int_old(n, prec, rounding=round_fast):
if n < 1000:
return from_int(ifac(n-1), prec, rounding)
# XXX: choose the cutoff less arbitrarily
size = int(n*math.log(n,2))
if prec > size/20.0:
return from_int(ifac(n-1), prec, rounding)
return mpf_gamma(from_int(n), prec, rounding)
def mpf_factorial_old(x, prec, rounding=round_fast):
return mpf_gamma_old(x, prec, rounding, p1=0)
def mpc_factorial_old(x, prec, rounding=round_fast):
return mpc_gamma_old(x, prec, rounding, p1=0)
def mpf_gamma_old(x, prec, rounding=round_fast, p1=1):
"""
Computes the gamma function of a real floating-point argument.
With p1=0, computes a factorial instead.
"""
sign, man, exp, bc = x
if not man:
if x == finf:
return finf
if x == fninf or x == fnan:
return fnan
# More precision is needed for enormous x. TODO:
# use Stirling's formula + Euler-Maclaurin summation
size = exp + bc
if size > 5:
size = int(size * math.log(size,2))
wp = prec + max(0, size) + 15
if exp >= 0:
if sign or (p1 and not man):
raise ValueError("gamma function pole")
# A direct factorial is fastest
if exp + bc <= 10:
return from_int(ifac((man<<exp)-p1), prec, rounding)
reflect = sign or exp+bc < -1
if p1:
# Should be done exactly!
x = mpf_sub(x, fone)
# x < 0.25
if reflect:
# gamma = pi / (sin(pi*x) * gamma(1-x))
wp += 15
pix = mpf_mul(x, mpf_pi(wp), wp)
t = mpf_sin_pi(x, wp)
g = mpf_gamma_old(mpf_sub(fone, x), wp)
return mpf_div(pix, mpf_mul(t, g, wp), prec, rounding)
sprec, a, c = get_spouge_coefficients(wp)
s = spouge_sum_real(x, sprec, a, c)
# gamma = exp(log(x+a)*(x+0.5) - xpa) * s
xpa = mpf_add(x, from_int(a), wp)
logxpa = mpf_log(xpa, wp)
xph = mpf_add(x, fhalf, wp)
t = mpf_sub(mpf_mul(logxpa, xph, wp), xpa, wp)
t = mpf_mul(mpf_exp(t, wp), s, prec, rounding)
return t
def mpc_gamma_old(x, prec, rounding=round_fast, p1=1):
re, im = x
if im == fzero:
return mpf_gamma_old(re, prec, rounding, p1), fzero
# More precision is needed for enormous x.
sign, man, exp, bc = re
isign, iman, iexp, ibc = im
if re == fzero:
size = iexp+ibc
else:
size = max(exp+bc, iexp+ibc)
if size > 5:
size = int(size * math.log(size,2))
reflect = sign or (exp+bc < -1)
wp = prec + max(0, size) + 25
# Near x = 0 pole (TODO: other poles)
if p1:
if size < -prec-5:
return mpc_add_mpf(mpc_div(mpc_one, x, 2*prec+10), \
mpf_neg(mpf_euler(2*prec+10)), prec, rounding)
elif size < -5:
wp += (-2*size)
if p1:
# Should be done exactly!
re_orig = re
re = mpf_sub(re, fone, bc+abs(exp)+2)
x = re, im
if reflect:
# Reflection formula
wp += 15
pi = mpf_pi(wp), fzero
pix = mpc_mul(x, pi, wp)
t = mpc_sin_pi(x, wp)
u = mpc_sub(mpc_one, x, wp)
g = mpc_gamma_old(u, wp)
w = mpc_mul(t, g, wp)
return mpc_div(pix, w, wp)
# Extremely close to the real line?
# XXX: reflection formula
if iexp+ibc < -wp:
a = mpf_gamma_old(re_orig, wp)
b = mpf_psi0(re_orig, wp)
gamma_diff = mpf_div(a, b, wp)
return mpf_pos(a, prec, rounding), mpf_mul(gamma_diff, im, prec, rounding)
sprec, a, c = get_spouge_coefficients(wp)
s = spouge_sum_complex(re, im, sprec, a, c)
# gamma = exp(log(x+a)*(x+0.5) - xpa) * s
repa = mpf_add(re, from_int(a), wp)
logxpa = mpc_log((repa, im), wp)
reph = mpf_add(re, fhalf, wp)
t = mpc_sub(mpc_mul(logxpa, (reph, im), wp), (repa, im), wp)
t = mpc_mul(mpc_exp(t, wp), s, prec, rounding)
return t
#-----------------------------------------------------------------------#
# #
# Polygamma functions #
# #
#-----------------------------------------------------------------------#
"""
For all polygamma (psi) functions, we use the Euler-Maclaurin summation
formula. It looks slightly different in the m = 0 and m > 0 cases.
For m = 0, we have
oo
___ B
(0) 1 \ 2 k -2 k
psi (z) ~ log z + --- - ) ------ z
2 z /___ (2 k)!
k = 1
Experiment shows that the minimum term of the asymptotic series
reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence
for psi (equivalent, in fact, to summing to the first few terms
directly before applying E-M) to obtain z large enough.
Since, very crudely, log z ~= 1 for Re(z) > 1, we can use
fixed-point arithmetic (if z is extremely large, log(z) itself
is a sufficient approximation, so we can stop there already).
For Re(z) << 0, we could use recurrence, but this is of course
inefficient for large negative z, so there we use the
reflection formula instead.
For m > 0, we have
N - 1
___
~~~(m) [ \ 1 ] 1 1
psi (z) ~ [ ) -------- ] + ---------- + -------- +
[ /___ m+1 ] m+1 m
k = 1 (z+k) ] 2 (z+N) m (z+N)
oo
___ B
\ 2 k (m+1) (m+2) ... (m+2k-1)
+ ) ------ ------------------------
/___ (2 k)! m + 2 k
k = 1 (z+N)
where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!).
Here again N is chosen to make z+N large enough for the minimum
term in the last series to become smaller than eps.
TODO: the current estimation of N for m > 0 is *very suboptimal*.
TODO: implement the reflection formula for m > 0, Re(z) << 0.
It is generally a combination of multiple cotangents. Need to
figure out a reasonably simple way to generate these formulas
on the fly.
TODO: maybe use exact algorithms to compute psi for integral
and certain rational arguments, as this can be much more
efficient. (On the other hand, the availability of these
special values provides a convenient way to test the general
algorithm.)
"""
# Harmonic numbers are just shifted digamma functions
# We should calculate these exactly when x is an integer
# and when doing so is faster.
def mpf_harmonic(x, prec, rnd):
if x in (fzero, fnan, finf):
return x
a = mpf_psi0(mpf_add(fone, x, prec+5), prec)
return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd)
def mpc_harmonic(z, prec, rnd):
if z[1] == fzero:
return (mpf_harmonic(z[0], prec, rnd), fzero)
a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec)
return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd)
def mpf_psi0(x, prec, rnd=round_fast):
"""
Computation of the digamma function (psi function of order 0)
of a real argument.
"""
sign, man, exp, bc = x
wp = prec + 10
if not man:
if x == finf: return x
if x == fninf or x == fnan: return fnan
if x == fzero or (exp >= 0 and sign):
raise ValueError("polygamma pole")
# Reflection formula
if sign and exp+bc > 3:
c, s = mpf_cos_sin_pi(x, wp)
q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp)
p = mpf_psi0(mpf_sub(fone, x, wp), wp)
return mpf_sub(p, q, prec, rnd)
# The logarithmic term is accurate enough
if (not sign) and bc + exp > wp:
return mpf_log(mpf_sub(x, fone, wp), prec, rnd)
# Initial recurrence to obtain a large enough x
m = to_int(x)
n = int(0.11*wp) + 2
s = MPZ_ZERO
x = to_fixed(x, wp)
one = MPZ_ONE << wp
if m < n:
for k in xrange(m, n):
s -= (one << wp) // x
x += one
x -= one
# Logarithmic term
s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp)
# Endpoint term in Euler-Maclaurin expansion
s += (one << wp) // (2*x)
# Euler-Maclaurin remainder sum
x2 = (x*x) >> wp
t = one
prev = 0
k = 1
while 1:
t = (t*x2) >> wp
bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp)
offset = (bexp + 2*wp)
if offset >= 0: term = (bman << offset) // (t*(2*k))
else: term = (bman >> (-offset)) // (t*(2*k))
if k & 1: s -= term
else: s += term
if k > 2 and term >= prev:
break
prev = term
k += 1
return from_man_exp(s, -wp, wp, rnd)
def mpc_psi0(z, prec, rnd=round_fast):
"""
Computation of the digamma function (psi function of order 0)
of a complex argument.
"""
re, im = z
# Fall back to the real case
if im == fzero:
return (mpf_psi0(re, prec, rnd), fzero)
wp = prec + 20
sign, man, exp, bc = re
# Reflection formula
if sign and exp+bc > 3:
c = mpc_cos_pi(z, wp)
s = mpc_sin_pi(z, wp)
q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp)
p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp)
return mpc_sub(p, q, prec, rnd)
# Just the logarithmic term
if (not sign) and bc + exp > wp:
return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd)
# Initial recurrence to obtain a large enough z
w = to_int(re)
n = int(0.11*wp) + 2
s = mpc_zero
if w < n:
for k in xrange(w, n):
s = mpc_sub(s, mpc_reciprocal(z, wp), wp)
z = mpc_add_mpf(z, fone, wp)
z = mpc_sub(z, mpc_one, wp)
# Logarithmic and endpoint term
s = mpc_add(s, mpc_log(z, wp), wp)
s = mpc_add(s, mpc_div(mpc_half, z, wp), wp)
# Euler-Maclaurin remainder sum
z2 = mpc_square(z, wp)
t = mpc_one
prev = mpc_zero
k = 1
eps = mpf_shift(fone, -wp+2)
while 1:
t = mpc_mul(t, z2, wp)
bern = mpf_bernoulli(2*k, wp)
term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp)
s = mpc_sub(s, term, wp)
szterm = mpc_abs(term, 10)
if k > 2 and mpf_le(szterm, eps):
break
prev = term
k += 1
return s
# Currently unoptimized
def mpf_psi(m, x, prec, rnd=round_fast):
"""
Computation of the polygamma function of arbitrary integer order
m >= 0, for a real argument x.
"""
if m == 0:
return mpf_psi0(x, prec, rnd=round_fast)
return mpc_psi(m, (x, fzero), prec, rnd)[0]
def mpc_psi(m, z, prec, rnd=round_fast):
"""
Computation of the polygamma function of arbitrary integer order
m >= 0, for a complex argument z.
"""
if m == 0:
return mpc_psi0(z, prec, rnd)
re, im = z
wp = prec + 20
sign, man, exp, bc = re
if not im[1]:
if im in (finf, fninf, fnan):
return (fnan, fnan)
if not man:
if re == finf and im == fzero:
return (fzero, fzero)
if re == fnan:
return (fnan, fnan)
# Recurrence
w = to_int(re)
n = int(0.4*wp + 4*m)
s = mpc_zero
if w < n:
for k in xrange(w, n):
t = mpc_pow_int(z, -m-1, wp)
s = mpc_add(s, t, wp)
z = mpc_add_mpf(z, fone, wp)
zm = mpc_pow_int(z, -m, wp)
z2 = mpc_pow_int(z, -2, wp)
# 1/m*(z+N)^m
integral_term = mpc_div_mpf(zm, from_int(m), wp)
s = mpc_add(s, integral_term, wp)
# 1/2*(z+N)^(-(m+1))
s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp)
a = m + 1
b = 2
k = 1
# Important: we want to sum up to the *relative* error,
# not the absolute error, because psi^(m)(z) might be tiny
magn = mpc_abs(s, 10)
magn = magn[2]+magn[3]
eps = mpf_shift(fone, magn-wp+2)
while 1:
zm = mpc_mul(zm, z2, wp)
bern = mpf_bernoulli(2*k, wp)
scal = mpf_mul_int(bern, a, wp)
scal = mpf_div(scal, from_int(b), wp)
term = mpc_mul_mpf(zm, scal, wp)
s = mpc_add(s, term, wp)
szterm = mpc_abs(term, 10)
if k > 2 and mpf_le(szterm, eps):
break
#print k, to_str(szterm, 10), to_str(eps, 10)
a *= (m+2*k)*(m+2*k+1)
b *= (2*k+1)*(2*k+2)
k += 1
# Scale and sign factor
v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd)
if not (m & 1):
v = mpf_neg(v[0]), mpf_neg(v[1])
return v
#-----------------------------------------------------------------------#
# #
# Riemann zeta function #
# #
#-----------------------------------------------------------------------#
"""
We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation
n-1
___ k
-1 \ (-1) (d_k - d_n)
eta(s) ~= ---- ) ------------------
d_n /___ s
k = 0 (k + 1)
where
k
___ i
\ (n + i - 1)! 4
d_k = n ) ---------------.
/___ (n - i)! (2i)!
i = 0
If s = a + b*I, the absolute error for eta(s) is bounded by
3 (1 + 2|b|)
------------ * exp(|b| pi/2)
n
(3+sqrt(8))
Disregarding the linear term, we have approximately,
log(err) ~= log(exp(1.58*|b|)) - log(5.8**n)
log(err) ~= 1.58*|b| - log(5.8)*n
log(err) ~= 1.58*|b| - 1.76*n
log2(err) ~= 2.28*|b| - 2.54*n
So for p bits, we should choose n > (p + 2.28*|b|) / 2.54.
References:
-----------
Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function"
http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps
http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
borwein_cache = {}
def borwein_coefficients(n):
if n in borwein_cache:
return borwein_cache[n]
ds = [MPZ_ZERO] * (n+1)
d = MPZ_ONE
s = ds[0] = MPZ_ONE
for i in range(1, n+1):
d = d * 4 * (n+i-1) * (n-i+1)
d //= ((2*i) * ((2*i)-1))
s += d
ds[i] = s
borwein_cache[n] = ds
return ds
ZETA_INT_CACHE_MAX_PREC = 1000
zeta_int_cache = {}
def mpf_zeta_int(s, prec, rnd=round_fast):
"""
Optimized computation of zeta(s) for an integer s.
"""
wp = prec + 20
s = int(s)
if s in zeta_int_cache and zeta_int_cache[s][0] >= wp:
return mpf_pos(zeta_int_cache[s][1], prec, rnd)
if s < 2:
if s == 1:
raise ValueError("zeta(1) pole")
if not s:
return mpf_neg(fhalf)
return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd)
# 2^-s term vanishes?
if s >= wp:
return mpf_perturb(fone, 0, prec, rnd)
# 5^-s term vanishes?
elif s >= wp*0.431:
t = one = 1 << wp
t += 1 << (wp - s)
t += one // (MPZ_THREE ** s)
t += 1 << max(0, wp - s*2)
return from_man_exp(t, -wp, prec, rnd)
else:
# Fast enough to sum directly?
# Even better, we use the Euler product (idea stolen from pari)
m = (float(wp)/(s-1) + 1)
if m < 30:
needed_terms = int(2.0**m + 1)
if needed_terms < int(wp/2.54 + 5) / 10:
t = fone
for k in list_primes(needed_terms):
#print k, needed_terms
powprec = int(wp - s*math.log(k,2))
if powprec < 2:
break
a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp)
t = mpf_mul(t, a, wp)
return mpf_div(fone, t, wp)
# Use Borwein's algorithm
n = int(wp/2.54 + 5)
d = borwein_coefficients(n)
t = MPZ_ZERO
s = MPZ(s)
for k in xrange(n):
t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s
t = (t << wp) // (-d[n])
t = (t << wp) // ((1 << wp) - (1 << (wp+1-s)))
if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache):
zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp))
return from_man_exp(t, -wp-wp, prec, rnd)
def mpf_zeta(s, prec, rnd=round_fast, alt=0):
sign, man, exp, bc = s
if not man:
if s == fzero:
if alt:
return fhalf
else:
return mpf_neg(fhalf)
if s == finf:
return fone
return fnan
wp = prec + 20
# First term vanishes?
if (not sign) and (exp + bc > (math.log(wp,2) + 2)):
return mpf_perturb(fone, alt, prec, rnd)
# Optimize for integer arguments
elif exp >= 0:
if alt:
if s == fone:
return mpf_ln2(prec, rnd)
z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd])
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_mul(z, q, prec, rnd)
else:
return mpf_zeta_int(to_int(s), prec, rnd)
# Negative: use the reflection formula
# Borwein only proves the accuracy bound for x >= 1/2. However, based on
# tests, the accuracy without reflection is quite good even some distance
# to the left of 1/2. XXX: verify this.
if sign:
# XXX: could use the separate refl. formula for Dirichlet eta
if alt:
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_mul(mpf_zeta(s, wp), q, prec, rnd)
# XXX: -1 should be done exactly
y = mpf_sub(fone, s, 10*wp)
a = mpf_gamma(y, wp)
b = mpf_zeta(y, wp)
c = mpf_sin_pi(mpf_shift(s, -1), wp)
wp2 = wp + (exp+bc)
pi = mpf_pi(wp+wp2)
d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2)
return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd)
# Near pole
r = mpf_sub(fone, s, wp)
asign, aman, aexp, abc = mpf_abs(r)
pole_dist = -2*(aexp+abc)
if pole_dist > wp:
if alt:
return mpf_ln2(prec, rnd)
else:
q = mpf_neg(mpf_div(fone, r, wp))
return mpf_add(q, mpf_euler(wp), prec, rnd)
else:
wp += max(0, pole_dist)
t = MPZ_ZERO
#wp += 16 - (prec & 15)
# Use Borwein's algorithm
n = int(wp/2.54 + 5)
d = borwein_coefficients(n)
t = MPZ_ZERO
sf = to_fixed(s, wp)
ln2 = ln2_fixed(wp)
for k in xrange(n):
u = (-sf*log_int_fixed(k+1, wp, ln2)) >> wp
#esign, eman, eexp, ebc = mpf_exp(u, wp)
#offset = eexp + wp
#if offset >= 0:
# w = ((d[k] - d[n]) * eman) << offset
#else:
# w = ((d[k] - d[n]) * eman) >> (-offset)
eman = exp_fixed(u, wp, ln2)
w = (d[k] - d[n]) * eman
if k & 1:
t -= w
else:
t += w
t = t // (-d[n])
t = from_man_exp(t, -wp, wp)
if alt:
return mpf_pos(t, prec, rnd)
else:
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_div(t, q, prec, rnd)
def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False):
re, im = s
if im == fzero:
return mpf_zeta(re, prec, rnd, alt), fzero
# slow for large s
if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)):
raise NotImplementedError
wp = prec + 20
# Near pole
r = mpc_sub(mpc_one, s, wp)
asign, aman, aexp, abc = mpc_abs(r, 10)
pole_dist = -2*(aexp+abc)
if pole_dist > wp:
if alt:
q = mpf_ln2(wp)
y = mpf_mul(q, mpf_euler(wp), wp)
g = mpf_shift(mpf_mul(q, q, wp), -1)
g = mpf_sub(y, g)
z = mpc_mul_mpf(r, mpf_neg(g), wp)
z = mpc_add_mpf(z, q, wp)
return mpc_pos(z, prec, rnd)
else:
q = mpc_neg(mpc_div(mpc_one, r, wp))
q = mpc_add_mpf(q, mpf_euler(wp), wp)
return mpc_pos(q, prec, rnd)
else:
wp += max(0, pole_dist)
# Reflection formula. To be rigorous, we should reflect to the left of
# re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary
# slowdown for interesting values of s
if mpf_lt(re, fzero):
# XXX: could use the separate refl. formula for Dirichlet eta
if alt:
q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp),
wp), wp)
return mpc_mul(mpc_zeta(s, wp), q, prec, rnd)
# XXX: -1 should be done exactly
y = mpc_sub(mpc_one, s, 10*wp)
a = mpc_gamma(y, wp)
b = mpc_zeta(y, wp)
c = mpc_sin_pi(mpc_shift(s, -1), wp)
rsign, rman, rexp, rbc = re
isign, iman, iexp, ibc = im
mag = max(rexp+rbc, iexp+ibc)
wp2 = wp + mag
pi = mpf_pi(wp+wp2)
pi2 = (mpf_shift(pi, 1), fzero)
d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2)
return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd)
n = int(wp/2.54 + 5)
n += int(0.9*abs(to_int(im)))
d = borwein_coefficients(n)
ref = to_fixed(re, wp)
imf = to_fixed(im, wp)
tre = MPZ_ZERO
tim = MPZ_ZERO
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
critical_line = re == fhalf
ln2 = ln2_fixed(wp)
pi2 = pi_fixed(wp-1)
wp2 = wp+wp
for k in xrange(n):
log = log_int_fixed(k+1, wp, ln2)
# A square root is much cheaper than an exp
if critical_line:
w = one_2wp // isqrt_fast((k+1) << wp2)
else:
w = exp_fixed((-ref*log) >> wp, wp)
if k & 1:
w *= (d[n] - d[k])
else:
w *= (d[k] - d[n])
wre, wim = cos_sin_fixed((-imf*log)>>wp, wp, pi2)
tre += (w * wre) >> wp
tim += (w * wim) >> wp
tre //= (-d[n])
tim //= (-d[n])
tre = from_man_exp(tre, -wp, wp)
tim = from_man_exp(tim, -wp, wp)
if alt:
return mpc_pos((tre, tim), prec, rnd)
else:
q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp)
return mpc_div((tre, tim), q, prec, rnd)
def mpf_altzeta(s, prec, rnd=round_fast):
return mpf_zeta(s, prec, rnd, 1)
def mpc_altzeta(s, prec, rnd=round_fast):
return mpc_zeta(s, prec, rnd, 1)
# Not optimized currently
mpf_zetasum = None
def pow_fixed(x, n, wp):
if n == 1:
return x
y = MPZ_ONE << wp
while n:
if n & 1:
y = (y*x) >> wp
n -= 1
x = (x*x) >> wp
n //= 2
return y
# TODO: optimize / cleanup interface / unify with list_primes
sieve_cache = []
primes_cache = []
mult_cache = []
def primesieve(n):
global sieve_cache, primes_cache, mult_cache
if n < len(sieve_cache):
sieve = sieve_cache#[:n+1]
primes = primes_cache[:primes_cache.index(max(sieve))+1]
mult = mult_cache#[:n+1]
return sieve, primes, mult
sieve = [0] * (n+1)
mult = [0] * (n+1)
primes = list_primes(n)
for p in primes:
#sieve[p::p] = p
for k in xrange(p,n+1,p):
sieve[k] = p
for i, p in enumerate(sieve):
if i >= 2:
m = 1
n = i // p
while not n % p:
n //= p
m += 1
mult[i] = m
sieve_cache = sieve
primes_cache = primes
mult_cache = mult
return sieve, primes, mult
def zetasum_sieved(critical_line, sre, sim, a, n, wp):
assert a >= 1
sieve, primes, mult = primesieve(a+n)
basic_powers = {}
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
wp2 = wp+wp
ln2 = ln2_fixed(wp)
pi2 = pi_fixed(wp-1)
for p in primes:
if p*2 > a+n:
break
log = log_int_fixed(p, wp, ln2)
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
if critical_line:
u = one_2wp // isqrt_fast(p<<wp2)
else:
u = exp_fixed((-sre*log)>>wp, wp)
pre = (u*cos) >> wp
pim = (u*sin) >> wp
basic_powers[p] = [(pre, pim)]
tre, tim = pre, pim
for m in range(1,int(math.log(a+n,p)+0.01)+1):
tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
basic_powers[p].append((tre,tim))
xre = MPZ_ZERO
xim = MPZ_ZERO
if a == 1:
xre += one
aa = max(a,2)
for k in xrange(aa, a+n+1):
p = sieve[k]
if p in basic_powers:
m = mult[k]
tre, tim = basic_powers[p][m-1]
while 1:
k //= p**m
if k == 1:
break
p = sieve[k]
m = mult[k]
pre, pim = basic_powers[p][m-1]
tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
else:
log = log_int_fixed(k, wp, ln2)
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
if critical_line:
u = one_2wp // isqrt_fast(k<<wp2)
else:
u = exp_fixed((-sre*log)>>wp, wp)
tre = (u*cos) >> wp
tim = (u*sin) >> wp
xre += tre
xim += tim
return xre, xim
# Set to something large to disable
ZETASUM_SIEVE_CUTOFF = 10
def mpc_zetasum(s, a, n, derivatives, reflect, prec):
"""
Fast version of mp._zetasum, assuming s = complex, a = integer.
"""
wp = prec + 10
have_derivatives = derivatives != [0]
have_one_derivative = len(derivatives) == 1
# parse s
sre, sim = s
critical_line = (sre == fhalf)
sre = to_fixed(sre, wp)
sim = to_fixed(sim, wp)
if a > 0 and n > ZETASUM_SIEVE_CUTOFF and not have_derivatives and not reflect:
re, im = zetasum_sieved(critical_line, sre, sim, a, n, wp)
xs = [(from_man_exp(re, -wp, prec, 'n'), from_man_exp(im, -wp, prec, 'n'))]
return xs, []
maxd = max(derivatives)
if not have_one_derivative:
derivatives = range(maxd+1)
# x_d = 0, y_d = 0
xre = [MPZ_ZERO for d in derivatives]
xim = [MPZ_ZERO for d in derivatives]
if reflect:
yre = [MPZ_ZERO for d in derivatives]
yim = [MPZ_ZERO for d in derivatives]
else:
yre = yim = []
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
ln2 = ln2_fixed(wp)
pi2 = pi_fixed(wp-1)
wp2 = wp+wp
for w in xrange(a, a+n+1):
log = log_int_fixed(w, wp, ln2)
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
if critical_line:
u = one_2wp // isqrt_fast(w<<wp2)
else:
u = exp_fixed((-sre*log)>>wp, wp)
xterm_re = (u * cos) >> wp
xterm_im = (u * sin) >> wp
if reflect:
reciprocal = (one_2wp // (u*w))
yterm_re = (reciprocal * cos) >> wp
yterm_im = (reciprocal * sin) >> wp
if have_derivatives:
if have_one_derivative:
log = pow_fixed(log, maxd, wp)
xre[0] += (xterm_re * log) >> wp
xim[0] += (xterm_im * log) >> wp
if reflect:
yre[0] += (yterm_re * log) >> wp
yim[0] += (yterm_im * log) >> wp
else:
t = MPZ_ONE << wp
for d in derivatives:
xre[d] += (xterm_re * t) >> wp
xim[d] += (xterm_im * t) >> wp
if reflect:
yre[d] += (yterm_re * t) >> wp
yim[d] += (yterm_im * t) >> wp
t = (t * log) >> wp
else:
xre[0] += xterm_re
xim[0] += xterm_im
if reflect:
yre[0] += yterm_re
yim[0] += yterm_im
if have_derivatives:
if have_one_derivative:
if maxd % 2:
xre[0] = -xre[0]
xim[0] = -xim[0]
if reflect:
yre[0] = -yre[0]
yim[0] = -yim[0]
else:
xre = [(-1)**d * xre[d] for d in derivatives]
xim = [(-1)**d * xim[d] for d in derivatives]
if reflect:
yre = [(-1)**d * yre[d] for d in derivatives]
yim = [(-1)**d * yim[d] for d in derivatives]
xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n'))
for (xa, xb) in zip(xre, xim)]
ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n'))
for (ya, yb) in zip(yre, yim)]
return xs, ys
#-----------------------------------------------------------------------#
# #
# The gamma function (NEW IMPLEMENTATION) #
# #
#-----------------------------------------------------------------------#
# Higher means faster, but more precomputation time
MAX_GAMMA_TAYLOR_PREC = 5000
# Need to derive higher bounds for Taylor series to go higher
assert MAX_GAMMA_TAYLOR_PREC < 15000
# Use Stirling's series if abs(x) > beta*prec
# Important: must be large enough for convergence!
GAMMA_STIRLING_BETA = 0.2
SMALL_FACTORIAL_CACHE_SIZE = 150
gamma_taylor_cache = {}
gamma_stirling_cache = {}
small_factorial_cache = [from_int(ifac(n)) for \
n in range(SMALL_FACTORIAL_CACHE_SIZE+1)]
def zeta_array(N, prec):
"""
zeta(n) = A * pi**n / n! + B
where A is a rational number (A = Bernoulli number
for n even) and B is an infinite sum over powers of exp(2*pi).
(B = 0 for n even).
TODO: this is currently only used for gamma, but could
be very useful elsewhere.
"""
extra = 30
wp = prec+extra
zeta_values = [MPZ_ZERO] * (N+2)
pi = pi_fixed(wp)
# STEP 1:
one = MPZ_ONE << wp
zeta_values[0] = -one//2
f_2pi = mpf_shift(mpf_pi(wp),1)
exp_2pi_k = exp_2pi = mpf_exp(f_2pi, wp)
# Compute exponential series
# Store values of 1/(exp(2*pi*k)-1),
# exp(2*pi*k)/(exp(2*pi*k)-1)**2, 1/(exp(2*pi*k)-1)**2
# pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
exps3 = []
k = 1
while 1:
tp = wp - 9*k
if tp < 1:
break
# 1/(exp(2*pi*k-1)
q1 = mpf_div(fone, mpf_sub(exp_2pi_k, fone, tp), tp)
# pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
q2 = mpf_mul(exp_2pi_k, mpf_mul(q1,q1,tp), tp)
q1 = to_fixed(q1, wp)
q2 = to_fixed(q2, wp)
q2 = (k * q2 * pi) >> wp
exps3.append((q1, q2))
# Multiply for next round
exp_2pi_k = mpf_mul(exp_2pi_k, exp_2pi, wp)
k += 1
# Exponential sum
for n in xrange(3, N+1, 2):
s = MPZ_ZERO
k = 1
for e1, e2 in exps3:
if n%4 == 3:
t = e1 // k**n
else:
U = (n-1)//4
t = (e1 + e2//U) // k**n
if not t:
break
s += t
k += 1
zeta_values[n] = -2*s
# Even zeta values
B = [mpf_abs(mpf_bernoulli(k,wp)) for k in xrange(N+2)]
pi_pow = fpi = mpf_pow_int(mpf_shift(mpf_pi(wp), 1), 2, wp)
pi_pow = mpf_div(pi_pow, from_int(4), wp)
for n in xrange(2,N+2,2):
z = mpf_mul(B[n], pi_pow, wp)
zeta_values[n] = to_fixed(z, wp)
pi_pow = mpf_mul(pi_pow, fpi, wp)
pi_pow = mpf_div(pi_pow, from_int((n+1)*(n+2)), wp)
# Zeta sum
reciprocal_pi = (one << wp) // pi
for n in xrange(3, N+1, 4):
U = (n-3)//4
s = zeta_values[4*U+4]*(4*U+7)//4
for k in xrange(1, U+1):
s -= (zeta_values[4*k] * zeta_values[4*U+4-4*k]) >> wp
zeta_values[n] += (2*s*reciprocal_pi) >> wp
for n in xrange(5, N+1, 4):
U = (n-1)//4
s = zeta_values[4*U+2]*(2*U+1)
for k in xrange(1, 2*U+1):
s += ((-1)**k*2*k* zeta_values[2*k] * zeta_values[4*U+2-2*k])>>wp
zeta_values[n] += ((s*reciprocal_pi)>>wp)//(2*U)
return [x>>extra for x in zeta_values]
def gamma_taylor_coefficients(inprec):
"""
Gives the Taylor coefficients of 1/gamma(1+x) as
a list of fixed-point numbers. Enough coefficients are returned
to ensure that the series converges to the given precision
when x is in [0.5, 1.5].
"""
# Reuse nearby cache values (small case)
if inprec < 400:
prec = inprec + (10-(inprec%10))
elif inprec < 1000:
prec = inprec + (30-(inprec%30))
else:
prec = inprec
if prec in gamma_taylor_cache:
return gamma_taylor_cache[prec], prec
# Experimentally determined bounds
if prec < 1000:
N = int(prec**0.76 + 2)
else:
# Valid to at least 15000 bits
N = int(prec**0.787 + 2)
# Reuse higher precision values
for cprec in gamma_taylor_cache:
if cprec > prec:
coeffs = [x>>(cprec-prec) for x in gamma_taylor_cache[cprec][-N:]]
if inprec < 1000:
gamma_taylor_cache[prec] = coeffs
return coeffs, prec
# Cache at a higher precision (large case)
if prec > 1000:
prec = int(prec * 1.2)
wp = prec + 20
A = [0] * N
A[0] = MPZ_ZERO
A[1] = MPZ_ONE << wp
A[2] = euler_fixed(wp)
# SLOW, reference implementation
#zeta_values = [0,0]+[to_fixed(mpf_zeta_int(k,wp),wp) for k in xrange(2,N)]
zeta_values = zeta_array(N, wp)
for k in xrange(3, N):
a = (-A[2]*A[k-1])>>wp
for j in xrange(2,k):
a += ((-1)**j * zeta_values[j] * A[k-j]) >> wp
a //= (1-k)
A[k] = a
A = [a>>20 for a in A]
A = A[::-1]
A = A[:-1]
gamma_taylor_cache[prec] = A
#return A, prec
return gamma_taylor_coefficients(inprec)
def gamma_fixed_taylor(xmpf, x, wp, prec, rnd, type):
# Determine nearest multiple of N/2
#n = int(x >> (wp-1))
#steps = (n-1)>>1
nearest_int = ((x >> (wp-1)) + MPZ_ONE) >> 1
one = MPZ_ONE << wp
coeffs, cwp = gamma_taylor_coefficients(wp)
if nearest_int > 0:
r = one
for i in xrange(nearest_int-1):
x -= one
r = (r*x) >> wp
x -= one
p = MPZ_ZERO
for c in coeffs:
p = c + ((x*p)>>wp)
p >>= (cwp-wp)
if type == 0:
return from_man_exp((r<<wp)//p, -wp, prec, rnd)
if type == 2:
return mpf_shift(from_rational(p, (r<<wp), prec, rnd), wp)
if type == 3:
return mpf_log(mpf_abs(from_man_exp((r<<wp)//p, -wp)), prec, rnd)
else:
r = one
for i in xrange(-nearest_int):
r = (r*x) >> wp
x += one
p = MPZ_ZERO
for c in coeffs:
p = c + ((x*p)>>wp)
p >>= (cwp-wp)
if wp - bitcount(abs(x)) > 10:
# pass very close to 0, so do floating-point multiply
g = mpf_add(xmpf, from_int(-nearest_int)) # exact
r = from_man_exp(p*r,-wp-wp)
r = mpf_mul(r, g, wp)
if type == 0:
return mpf_div(fone, r, prec, rnd)
if type == 2:
return mpf_pos(r, prec, rnd)
if type == 3:
return mpf_log(mpf_abs(mpf_div(fone, r, wp)), prec, rnd)
else:
r = from_man_exp(x*p*r,-3*wp)
if type == 0: return mpf_div(fone, r, prec, rnd)
if type == 2: return mpf_pos(r, prec, rnd)
if type == 3: return mpf_neg(mpf_log(mpf_abs(r), prec, rnd))
def stirling_coefficient(n):
if n in gamma_stirling_cache:
return gamma_stirling_cache[n]
p, q = bernfrac(n)
q *= MPZ(n*(n-1))
gamma_stirling_cache[n] = p, q, bitcount(abs(p)), bitcount(q)
return gamma_stirling_cache[n]
def real_stirling_series(x, prec):
"""
Sums the rational part of Stirling's expansion,
log(sqrt(2*pi)) - z + 1/(12*z) - 1/(360*z^3) + ...
"""
t = (MPZ_ONE<<(prec+prec)) // x # t = 1/x
u = (t*t)>>prec # u = 1/x**2
s = ln_sqrt2pi_fixed(prec) - x
# Add initial terms of Stirling's series
s += t//12; t = (t*u)>>prec
s -= t//360; t = (t*u)>>prec
s += t//1260; t = (t*u)>>prec
s -= t//1680; t = (t*u)>>prec
if not t: return s
s += t//1188; t = (t*u)>>prec
s -= 691*t//360360; t = (t*u)>>prec
s += t//156; t = (t*u)>>prec
if not t: return s
s -= 3617*t//122400; t = (t*u)>>prec
s += 43867*t//244188; t = (t*u)>>prec
s -= 174611*t//125400; t = (t*u)>>prec
if not t: return s
k = 22
# From here on, the coefficients are growing, so we
# have to keep t at a roughly constant size
usize = bitcount(abs(u))
tsize = bitcount(abs(t))
texp = 0
while 1:
p, q, pb, qb = stirling_coefficient(k)
term_mag = tsize + pb + texp
shift = -texp
m = pb - term_mag
if m > 0 and shift < m:
p >>= m
shift -= m
m = tsize - term_mag
if m > 0 and shift < m:
w = t >> m
shift -= m
else:
w = t
term = (t*p//q) >> shift
if not term:
break
s += term
t = (t*u) >> usize
texp -= (prec - usize)
k += 2
return s
def complex_stirling_series(x, y, prec):
# t = 1/z
_m = (x*x + y*y) >> prec
tre = (x << prec) // _m
tim = (-y << prec) // _m
# u = 1/z**2
ure = (tre*tre - tim*tim) >> prec
uim = tim*tre >> (prec-1)
# s = log(sqrt(2*pi)) - z
sre = ln_sqrt2pi_fixed(prec) - x
sim = -y
# Add initial terms of Stirling's series
sre += tre//12; sim += tim//12;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= tre//360; sim -= tim//360;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre += tre//1260; sim += tim//1260;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= tre//1680; sim -= tim//1680;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
if abs(tre) + abs(tim) < 5: return sre, sim
sre += tre//1188; sim += tim//1188;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= 691*tre//360360; sim -= 691*tim//360360;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre += tre//156; sim += tim//156;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
if abs(tre) + abs(tim) < 5: return sre, sim
sre -= 3617*tre//122400; sim -= 3617*tim//122400;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre += 43867*tre//244188; sim += 43867*tim//244188;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
sre -= 174611*tre//125400; sim -= 174611*tim//125400;
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
if abs(tre) + abs(tim) < 5: return sre, sim
k = 22
# From here on, the coefficients are growing, so we
# have to keep t at a roughly constant size
usize = bitcount(max(abs(ure), abs(uim)))
tsize = bitcount(max(abs(tre), abs(tim)))
texp = 0
while 1:
p, q, pb, qb = stirling_coefficient(k)
term_mag = tsize + pb + texp
shift = -texp
m = pb - term_mag
if m > 0 and shift < m:
p >>= m
shift -= m
m = tsize - term_mag
if m > 0 and shift < m:
wre = tre >> m
wim = tim >> m
shift -= m
else:
wre = tre
wim = tim
termre = (tre*p//q) >> shift
termim = (tim*p//q) >> shift
if abs(termre) + abs(termim) < 5:
break
sre += termre
sim += termim
tre, tim = ((tre*ure - tim*uim)>>usize), \
((tre*uim + tim*ure)>>usize)
texp -= (prec - usize)
k += 2
return sre, sim
def mpf_gamma(x, prec, rnd='d', type=0):
"""
This function implements multipurpose evaluation of the gamma
function, G(x), as well as the following versions of the same:
type = 0 -- G(x) [standard gamma function]
type = 1 -- G(x+1) = x*G(x+1) = x! [factorial]
type = 2 -- 1/G(x) [reciprocal gamma function]
type = 3 -- log(|G(x)|) [log-gamma function, real part]
"""
# Specal values
sign, man, exp, bc = x
if not man:
if x == fzero:
if type == 1: return fone
if type == 2: return fzero
raise ValueError("gamma function pole")
if x == finf:
if type == 2: return fzero
return finf
return fnan
# First of all, for log gamma, numbers can be well beyond the fixed-point
# range, so we must take care of huge numbers before e.g. trying
# to convert x to the nearest integer
if type == 3:
wp = prec+20
if exp+bc > wp and not sign:
return mpf_sub(mpf_mul(x, mpf_log(x, wp), wp), x, prec, rnd)
# We strongly want to special-case small integers
is_integer = exp >= 0
if is_integer:
# Poles
if sign:
if type == 2:
return fzero
raise ValueError("gamma function pole")
# n = x
n = man << exp
if n < SMALL_FACTORIAL_CACHE_SIZE:
if type == 0:
return mpf_pos(small_factorial_cache[n-1], prec, rnd)
if type == 1:
return mpf_pos(small_factorial_cache[n], prec, rnd)
if type == 2:
return mpf_div(fone, small_factorial_cache[n-1], prec, rnd)
if type == 3:
return mpf_log(small_factorial_cache[n-1], prec, rnd)
else:
# floor(abs(x))
n = int(man >> (-exp))
# Estimate size and precision
# Estimate log(gamma(|x|),2) as x*log(x,2)
mag = exp + bc
gamma_size = n*mag
if type == 3:
wp = prec + 20
else:
wp = prec + bitcount(gamma_size) + 20
# Very close to 0, pole
if mag < -wp:
if type == 0:
return mpf_sub(mpf_div(fone,x, wp),mpf_shift(fone,-wp),prec,rnd)
if type == 1: return mpf_sub(fone, x, prec, rnd)
if type == 2: return mpf_add(x, mpf_shift(fone,mag-wp), prec, rnd)
if type == 3: return mpf_neg(mpf_log(mpf_abs(x), prec, rnd))
# From now on, we assume having a gamma function
if type == 1:
return mpf_gamma(mpf_add(x, fone), prec, rnd, 0)
# Special case integers (those not small enough to be caught above,
# but still small enough for an exact factorial to be faster
# than an approximate algorithm), and half-integers
if exp >= -1:
if is_integer:
if gamma_size < 10*wp:
if type == 0:
return from_int(ifac(n-1), prec, rnd)
if type == 2:
return from_rational(MPZ_ONE, ifac(n-1), prec, rnd)
if type == 3:
return mpf_log(from_int(ifac(n-1)), prec, rnd)
# half-integer
if n < 100 or gamma_size < 10*wp:
if sign:
w = sqrtpi_fixed(wp)
if n % 2: f = ifac2(2*n+1)
else: f = -ifac2(2*n+1)
if type == 0:
return mpf_shift(from_rational(w, f, prec, rnd), -wp+n+1)
if type == 2:
return mpf_shift(from_rational(f, w, prec, rnd), wp-n-1)
if type == 3:
return mpf_log(mpf_shift(from_rational(w, abs(f),
prec, rnd), -wp+n+1), prec, rnd)
elif n == 0:
if type == 0: return mpf_sqrtpi(prec, rnd)
if type == 2: return mpf_div(fone, mpf_sqrtpi(wp), prec, rnd)
if type == 3: return mpf_log(mpf_sqrtpi(wp), prec, rnd)
else:
w = sqrtpi_fixed(wp)
w = from_man_exp(w * ifac2(2*n-1), -wp-n)
if type == 0: return mpf_pos(w, prec, rnd)
if type == 2: return mpf_div(fone, w, prec, rnd)
if type == 3: return mpf_log(mpf_abs(w), prec, rnd)
# Convert to fixed point
offset = exp + wp
if offset >= 0: absxman = man << offset
else: absxman = man >> (-offset)
# For log gamma, provide accurate evaluation for x = 1+eps and 2+eps
if type == 3 and not sign:
one = MPZ_ONE << wp
one_dist = abs(absxman-one)
two_dist = abs(absxman-2*one)
cancellation = (wp - bitcount(min(one_dist, two_dist)))
if cancellation > 10:
xsub1 = mpf_sub(fone, x)
xsub2 = mpf_sub(ftwo, x)
xsub1mag = xsub1[2]+xsub1[3]
xsub2mag = xsub2[2]+xsub2[3]
if xsub1mag < -wp:
return mpf_mul(mpf_euler(wp), mpf_sub(fone, x), prec, rnd)
if xsub2mag < -wp:
return mpf_mul(mpf_sub(fone, mpf_euler(wp)),
mpf_sub(x, ftwo), prec, rnd)
# Proceed but increase precision
wp += max(-xsub1mag, -xsub2mag)
offset = exp + wp
if offset >= 0: absxman = man << offset
else: absxman = man >> (-offset)
# Use Taylor series if appropriate
n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
if n < max(100, n_for_stirling) and wp < MAX_GAMMA_TAYLOR_PREC:
if sign:
absxman = -absxman
return gamma_fixed_taylor(x, absxman, wp, prec, rnd, type)
# Use Stirling's series
# First ensure that |x| is large enough for rapid convergence
xorig = x
# Argument reduction
r = 0
if n < n_for_stirling:
r = one = MPZ_ONE << wp
d = n_for_stirling - n
for k in xrange(d):
r = (r * absxman) >> wp
absxman += one
x = xabs = from_man_exp(absxman, -wp)
if sign:
x = mpf_neg(x)
else:
xabs = mpf_abs(x)
# Asymptotic series
y = real_stirling_series(absxman, wp)
u = to_fixed(mpf_log(xabs, wp), wp)
u = ((absxman - (MPZ_ONE<<(wp-1))) * u) >> wp
y += u
w = from_man_exp(y, -wp)
# Compute final value
if sign:
# Reflection formula
A = mpf_mul(mpf_sin_pi(xorig, wp), xorig, wp)
B = mpf_neg(mpf_pi(wp))
if type == 0 or type == 2:
A = mpf_mul(A, mpf_exp(w, wp))
if r:
B = mpf_mul(B, from_man_exp(r, -wp), wp)
if type == 0:
return mpf_div(B, A, prec, rnd)
if type == 2:
return mpf_div(A, B, prec, rnd)
if type == 3:
if r:
B = mpf_mul(B, from_man_exp(r, -wp), wp)
A = mpf_add(mpf_log(mpf_abs(A), wp), w, wp)
return mpf_sub(mpf_log(mpf_abs(B), wp), A, prec, rnd)
else:
if type == 0:
if r:
return mpf_div(mpf_exp(w, wp),
from_man_exp(r, -wp), prec, rnd)
return mpf_exp(w, prec, rnd)
if type == 2:
if r:
return mpf_div(from_man_exp(r, -wp),
mpf_exp(w, wp), prec, rnd)
return mpf_exp(mpf_neg(w), prec, rnd)
if type == 3:
if r:
return mpf_sub(w, mpf_log(from_man_exp(r,-wp), wp), prec, rnd)
return mpf_pos(w, prec, rnd)
def mpc_gamma(z, prec, rnd='d', type=0):
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero:
# Imaginary part on negative half-axis for log-gamma function
if type == 3 and asign:
re = mpf_gamma(a, prec, rnd, 3)
n = (-aman) >> (-aexp)
im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
return re, im
return mpf_gamma(a, prec, rnd, type), fzero
# Some kind of complex inf/nan
if (not aman and aexp) or (not bman and bexp):
return (fnan, fnan)
# Initial working precision
wp = prec + 20
amag = aexp+abc
bmag = bexp+bbc
if aman:
mag = max(amag, bmag)
else:
mag = bmag
# Close to 0
if mag < -8:
if mag < -wp:
# 1/gamma(z) = z + euler*z^2 + O(z^3)
v = mpc_add(z, mpc_mul_mpf(mpc_mul(z,z,wp),mpf_euler(wp),wp), wp)
if type == 0: return mpc_reciprocal(v, prec, rnd)
if type == 1: return mpc_div(z, v, prec, rnd)
if type == 2: return mpc_pos(v, prec, rnd)
if type == 3: return mpc_log(mpc_reciprocal(v, prec), prec, rnd)
elif type != 1:
wp += (-mag)
# Handle huge log-gamma values; must do this before converting to
# a fixed-point value. TODO: determine a precise cutoff of validity
# depending on amag and bmag
if type == 3 and mag > wp and ((not asign) or (bmag >= amag)):
return mpc_sub(mpc_mul(z, mpc_log(z, wp), wp), z, prec, rnd)
# From now on, we assume having a gamma function
if type == 1:
return mpc_gamma((mpf_add(a, fone), b), prec, rnd, 0)
an = abs(to_int(a))
bn = abs(to_int(b))
absn = max(an, bn)
gamma_size = absn*mag
if type == 3:
pass
else:
wp += bitcount(gamma_size)
# Reflect to the right half-plane. Note that Stirling's expansion
# is valid in the left half-plane too, as long as we're not too close
# to the real axis, but in order to use this argument reduction
# in the negative direction must be implemented.
#need_reflection = asign and ((bmag < 0) or (amag-bmag > 4))
need_reflection = asign
zorig = z
if need_reflection:
z = mpc_neg(z)
asign, aman, aexp, abc = a = z[0]
bsign, bman, bexp, bbc = b = z[1]
# Imaginary part very small compared to real one?
yfinal = 0
balance_prec = 0
if bmag < -10:
# Check z ~= 1 and z ~= 2 for loggamma
if type == 3:
zsub1 = mpc_sub_mpf(z, fone)
if zsub1[0] == fzero:
cancel1 = -bmag
else:
cancel1 = -max(zsub1[0][2]+zsub1[0][3], bmag)
if cancel1 > wp:
pi = mpf_pi(wp)
x = mpc_mul_mpf(zsub1, pi, wp)
x = mpc_mul(x, x, wp)
x = mpc_div_mpf(x, from_int(12), wp)
y = mpc_mul_mpf(zsub1, mpf_neg(mpf_euler(wp)), wp)
yfinal = mpc_add(x, y, wp)
if not need_reflection:
return mpc_pos(yfinal, prec, rnd)
elif cancel1 > 0:
wp += cancel1
zsub2 = mpc_sub_mpf(z, ftwo)
if zsub2[0] == fzero:
cancel2 = -bmag
else:
cancel2 = -max(zsub2[0][2]+zsub2[0][3], bmag)
if cancel2 > wp:
pi = mpf_pi(wp)
t = mpf_sub(mpf_mul(pi, pi), from_int(6))
x = mpc_mul_mpf(mpc_mul(zsub2, zsub2, wp), t, wp)
x = mpc_div_mpf(x, from_int(12), wp)
y = mpc_mul_mpf(zsub2, mpf_sub(fone, mpf_euler(wp)), wp)
yfinal = mpc_add(x, y, wp)
if not need_reflection:
return mpc_pos(yfinal, prec, rnd)
elif cancel2 > 0:
wp += cancel2
if bmag < -wp:
# Compute directly from the real gamma function.
pp = 2*(wp+10)
aabs = mpf_abs(a)
eps = mpf_shift(fone, amag-wp)
x1 = mpf_gamma(aabs, pp, type=type)
x2 = mpf_gamma(mpf_add(aabs, eps), pp, type=type)
xprime = mpf_div(mpf_sub(x2, x1, pp), eps, pp)
y = mpf_mul(b, xprime, prec, rnd)
yfinal = (x1, y)
# Note: we still need to use the reflection formula for
# near-poles, and the correct branch of the log-gamma function
if not need_reflection:
return mpc_pos(yfinal, prec, rnd)
else:
balance_prec += (-bmag)
wp += balance_prec
n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
need_reduction = absn < n_for_stirling
afix = to_fixed(a, wp)
bfix = to_fixed(b, wp)
r = 0
if not yfinal:
zprered = z
# Argument reduction
if absn < n_for_stirling:
absn = complex(an, bn)
d = int((1 + n_for_stirling**2 - bn**2)**0.5 - an)
rre = one = MPZ_ONE << wp
rim = MPZ_ZERO
for k in xrange(d):
rre, rim = ((afix*rre-bfix*rim)>>wp), ((afix*rim + bfix*rre)>>wp)
afix += one
r = from_man_exp(rre, -wp), from_man_exp(rim, -wp)
a = from_man_exp(afix, -wp)
z = a, b
yre, yim = complex_stirling_series(afix, bfix, wp)
# (z-1/2)*log(z) + S
lre, lim = mpc_log(z, wp)
lre = to_fixed(lre, wp)
lim = to_fixed(lim, wp)
yre = ((lre*afix - lim*bfix)>>wp) - (lre>>1) + yre
yim = ((lre*bfix + lim*afix)>>wp) - (lim>>1) + yim
y = from_man_exp(yre, -wp), from_man_exp(yim, -wp)
if r and type == 3:
# If re(z) > 0 and abs(z) <= 4, the branches of loggamma(z)
# and log(gamma(z)) coincide. Otherwise, use the zeroth order
# Stirling expansion to compute the correct imaginary part.
y = mpc_sub(y, mpc_log(r, wp), wp)
zfa = to_float(zprered[0])
zfb = to_float(zprered[1])
zfabs = math.hypot(zfa,zfb)
#if not (zfa > 0.0 and zfabs <= 4):
yfb = to_float(y[1])
u = math.atan2(zfb, zfa)
if zfabs <= 0.5:
gi = 0.577216*zfb - u
else:
gi = -zfb - 0.5*u + zfa*u + zfb*math.log(zfabs)
n = int(math.floor((gi-yfb)/(2*math.pi)+0.5))
y = (y[0], mpf_add(y[1], mpf_mul_int(mpf_pi(wp), 2*n, wp), wp))
if need_reflection:
if type == 0 or type == 2:
A = mpc_mul(mpc_sin_pi(zorig, wp), zorig, wp)
B = (mpf_neg(mpf_pi(wp)), fzero)
if yfinal:
if type == 2:
A = mpc_div(A, yfinal, wp)
else:
A = mpc_mul(A, yfinal, wp)
else:
A = mpc_mul(A, mpc_exp(y, wp), wp)
if r:
B = mpc_mul(B, r, wp)
if type == 0: return mpc_div(B, A, prec, rnd)
if type == 2: return mpc_div(A, B, prec, rnd)
# Reflection formula for the log-gamma function with correct branch
# http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0006/
# LogGamma[z] == -LogGamma[-z] - Log[-z] +
# Sign[Im[z]] Floor[Re[z]] Pi I + Log[Pi] -
# Log[Sin[Pi (z - Floor[Re[z]])]] -
# Pi I (1 - Abs[Sign[Im[z]]]) Abs[Floor[Re[z]]]
if type == 3:
if yfinal:
s1 = mpc_neg(yfinal)
else:
s1 = mpc_neg(y)
# s -= log(-z)
s1 = mpc_sub(s1, mpc_log(mpc_neg(zorig), wp), wp)
# floor(re(z))
rezfloor = mpf_floor(zorig[0])
imzsign = mpf_sign(zorig[1])
pi = mpf_pi(wp)
t = mpf_mul(pi, rezfloor)
t = mpf_mul_int(t, imzsign, wp)
s1 = (s1[0], mpf_add(s1[1], t, wp))
s1 = mpc_add_mpf(s1, mpf_log(pi, wp), wp)
t = mpc_sin_pi(mpc_sub_mpf(zorig, rezfloor), wp)
t = mpc_log(t, wp)
s1 = mpc_sub(s1, t, wp)
# Note: may actually be unused, because we fall back
# to the mpf_ function for real arguments
if not imzsign:
t = mpf_mul(pi, mpf_floor(rezfloor), wp)
s1 = (s1[0], mpf_sub(s1[1], t, wp))
return mpc_pos(s1, prec, rnd)
else:
if type == 0:
if r:
return mpc_div(mpc_exp(y, wp), r, prec, rnd)
return mpc_exp(y, prec, rnd)
if type == 2:
if r:
return mpc_div(r, mpc_exp(y, wp), prec, rnd)
return mpc_exp(mpc_neg(y), prec, rnd)
if type == 3:
return mpc_pos(y, prec, rnd)
def mpf_factorial(x, prec, rnd='d'):
return mpf_gamma(x, prec, rnd, 1)
def mpc_factorial(x, prec, rnd='d'):
return mpc_gamma(x, prec, rnd, 1)
def mpf_rgamma(x, prec, rnd='d'):
return mpf_gamma(x, prec, rnd, 2)
def mpc_rgamma(x, prec, rnd='d'):
return mpc_gamma(x, prec, rnd, 2)
def mpf_loggamma(x, prec, rnd='d'):
sign, man, exp, bc = x
if sign:
raise ComplexResult
return mpf_gamma(x, prec, rnd, 3)
def mpc_loggamma(z, prec, rnd='d'):
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero and asign:
re = mpf_gamma(a, prec, rnd, 3)
n = (-aman) >> (-aexp)
im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
return re, im
return mpc_gamma(z, prec, rnd, 3)
def mpf_gamma_int(n, prec, rnd=round_fast):
if n < SMALL_FACTORIAL_CACHE_SIZE:
return mpf_pos(small_factorial_cache[n-1], prec, rnd)
return mpf_gamma(from_int(n), prec, rnd)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/gammazeta.py
|
gammazeta.py
|
import math
from bisect import bisect
from .backend import xrange
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, BACKEND
from .libmpf import (
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast,
ComplexResult,
bitcount, bctable, lshift, rshift, giant_steps, sqrt_fixed,
from_int, to_int, from_man_exp, to_fixed, to_float, from_float,
from_rational, normalize,
fzero, fone, fnone, fhalf, finf, fninf, fnan,
mpf_cmp, mpf_sign, mpf_abs,
mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_div, mpf_shift,
mpf_rdiv_int, mpf_pow_int, mpf_sqrt,
reciprocal_rnd, negative_rnd, mpf_perturb,
isqrt_fast
)
from .libintmath import ifib
#-------------------------------------------------------------------------------
# Tuning parameters
#-------------------------------------------------------------------------------
# Cutoff for computing exp from cosh+sinh. This reduces the
# number of terms by half, but also requires a square root which
# is expensive with the pure-Python square root code.
if BACKEND == 'python':
EXP_COSH_CUTOFF = 600
else:
EXP_COSH_CUTOFF = 400
# Cutoff for using more than 2 series
EXP_SERIES_U_CUTOFF = 1500
# Also basically determined by sqrt
if BACKEND == 'python':
COS_SIN_CACHE_PREC = 400
else:
COS_SIN_CACHE_PREC = 200
COS_SIN_CACHE_STEP = 8
cos_sin_cache = {}
# Number of integer logarithms to cache (for zeta sums)
MAX_LOG_INT_CACHE = 2000
log_int_cache = {}
LOG_TAYLOR_PREC = 2500 # Use Taylor series with caching up to this prec
LOG_TAYLOR_SHIFT = 9 # Cache log values in steps of size 2^-N
log_taylor_cache = {}
# prec/size ratio of x for fastest convergence in AGM formula
LOG_AGM_MAG_PREC_RATIO = 20
ATAN_TAYLOR_PREC = 3000 # Same as for log
ATAN_TAYLOR_SHIFT = 7 # steps of size 2^-N
atan_taylor_cache = {}
# ~= next power of two + 20
cache_prec_steps = [22,22]
for k in xrange(1, bitcount(LOG_TAYLOR_PREC)+1):
cache_prec_steps += [min(2**k,LOG_TAYLOR_PREC)+20] * 2**(k-1)
#----------------------------------------------------------------------------#
# #
# Elementary mathematical constants #
# #
#----------------------------------------------------------------------------#
def constant_memo(f):
"""
Decorator for caching computed values of mathematical
constants. This decorator should be applied to a
function taking a single argument prec as input and
returning a fixed-point value with the given precision.
"""
f.memo_prec = -1
f.memo_val = None
def g(prec, **kwargs):
memo_prec = f.memo_prec
if prec <= memo_prec:
return f.memo_val >> (memo_prec-prec)
newprec = int(prec*1.05+10)
f.memo_val = f(newprec, **kwargs)
f.memo_prec = newprec
return f.memo_val >> (newprec-prec)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def def_mpf_constant(fixed):
"""
Create a function that computes the mpf value for a mathematical
constant, given a function that computes the fixed-point value.
Assumptions: the constant is positive and has magnitude ~= 1;
the fixed-point function rounds to floor.
"""
def f(prec, rnd=round_fast):
wp = prec + 20
v = fixed(wp)
if rnd in (round_up, round_ceiling):
v += 1
return normalize(0, v, -wp, bitcount(v), prec, rnd)
f.__doc__ = fixed.__doc__
return f
def bsp_acot(q, a, b, hyperbolic):
if b - a == 1:
a1 = MPZ(2*a + 3)
if hyperbolic or a&1:
return MPZ_ONE, a1 * q**2, a1
else:
return -MPZ_ONE, a1 * q**2, a1
m = (a+b)//2
p1, q1, r1 = bsp_acot(q, a, m, hyperbolic)
p2, q2, r2 = bsp_acot(q, m, b, hyperbolic)
return q2*p1 + r1*p2, q1*q2, r1*r2
# the acoth(x) series converges like the geometric series for x^2
# N = ceil(p*log(2)/(2*log(x)))
def acot_fixed(a, prec, hyperbolic):
"""
Compute acot(a) or acoth(a) for an integer a with binary splitting; see
http://numbers.computation.free.fr/Constants/Algorithms/splitting.html
"""
N = int(0.35 * prec/math.log(a) + 20)
p, q, r = bsp_acot(a, 0,N, hyperbolic)
return ((p+q)<<prec)//(q*a)
def machin(coefs, prec, hyperbolic=False):
"""
Evaluate a Machin-like formula, i.e., a linear combination of
acot(n) or acoth(n) for specific integer values of n, using fixed-
point arithmetic. The input should be a list [(c, n), ...], giving
c*acot[h](n) + ...
"""
extraprec = 10
s = MPZ_ZERO
for a, b in coefs:
s += MPZ(a) * acot_fixed(MPZ(b), prec+extraprec, hyperbolic)
return (s >> extraprec)
# Logarithms of integers are needed for various computations involving
# logarithms, powers, radix conversion, etc
@constant_memo
def ln2_fixed(prec):
"""
Computes ln(2). This is done with a hyperbolic Machin-type formula,
with binary splitting at high precision.
"""
return machin([(18, 26), (-2, 4801), (8, 8749)], prec, True)
@constant_memo
def ln10_fixed(prec):
"""
Computes ln(10). This is done with a hyperbolic Machin-type formula.
"""
return machin([(46, 31), (34, 49), (20, 161)], prec, True)
"""
For computation of pi, we use the Chudnovsky series:
oo
___ k
1 \ (-1) (6 k)! (A + B k)
----- = ) -----------------------
12 pi /___ 3 3k+3/2
(3 k)! (k!) C
k = 0
where A, B, and C are certain integer constants. This series adds roughly
14 digits per term. Note that C^(3/2) can be extracted so that the
series contains only rational terms. This makes binary splitting very
efficient.
The recurrence formulas for the binary splitting were taken from
ftp://ftp.gmplib.org/pub/src/gmp-chudnovsky.c
Previously, Machin's formula was used at low precision and the AGM iteration
was used at high precision. However, the Chudnovsky series is essentially as
fast as the Machin formula at low precision and in practice about 3x faster
than the AGM at high precision (despite theoretically having a worse
asymptotic complexity), so there is no reason not to use it in all cases.
"""
# Constants in Chudnovsky's series
CHUD_A = MPZ(13591409)
CHUD_B = MPZ(545140134)
CHUD_C = MPZ(640320)
CHUD_D = MPZ(12)
def bs_chudnovsky(a, b, level, verbose):
"""
Computes the sum from a to b of the series in the Chudnovsky
formula. Returns g, p, q where p/q is the sum as an exact
fraction and g is a temporary value used to save work
for recursive calls.
"""
if b-a == 1:
g = MPZ((6*b-5)*(2*b-1)*(6*b-1))
p = b**3 * CHUD_C**3 // 24
q = (-1)**b * g * (CHUD_A+CHUD_B*b)
else:
if verbose and level < 4:
print(" binary splitting", a, b)
mid = (a+b)//2
g1, p1, q1 = bs_chudnovsky(a, mid, level+1, verbose)
g2, p2, q2 = bs_chudnovsky(mid, b, level+1, verbose)
p = p1*p2
g = g1*g2
q = q1*p2 + q2*g1
return g, p, q
@constant_memo
def pi_fixed(prec, verbose=False, verbose_base=None):
"""
Compute floor(pi * 2**prec) as a big integer.
This is done using Chudnovsky's series (see comments in
libelefun.py for details).
"""
# The Chudnovsky series gives 14.18 digits per term
N = int(prec/3.3219280948/14.181647462 + 2)
if verbose:
print("binary splitting with N =", N)
g, p, q = bs_chudnovsky(0, N, 0, verbose)
sqrtC = isqrt_fast(CHUD_C<<(2*prec))
v = p*CHUD_C*sqrtC//((q+CHUD_A*p)*CHUD_D)
return v
def degree_fixed(prec):
return pi_fixed(prec)//180
def bspe(a, b):
"""
Sum series for exp(1)-1 between a, b, returning the result
as an exact fraction (p, q).
"""
if b-a == 1:
return MPZ_ONE, MPZ(b)
m = (a+b)//2
p1, q1 = bspe(a, m)
p2, q2 = bspe(m, b)
return p1*q2+p2, q1*q2
@constant_memo
def e_fixed(prec):
"""
Computes exp(1). This is done using the ordinary Taylor series for
exp, with binary splitting. For a description of the algorithm,
see:
http://numbers.computation.free.fr/Constants/
Algorithms/splitting.html
"""
# Slight overestimate of N needed for 1/N! < 2**(-prec)
# This could be tightened for large N.
N = int(1.1*prec/math.log(prec) + 20)
p, q = bspe(0,N)
return ((p+q)<<prec)//q
@constant_memo
def phi_fixed(prec):
"""
Computes the golden ratio, (1+sqrt(5))/2
"""
prec += 10
a = isqrt_fast(MPZ_FIVE<<(2*prec)) + (MPZ_ONE << prec)
return a >> 11
mpf_phi = def_mpf_constant(phi_fixed)
mpf_pi = def_mpf_constant(pi_fixed)
mpf_e = def_mpf_constant(e_fixed)
mpf_degree = def_mpf_constant(degree_fixed)
mpf_ln2 = def_mpf_constant(ln2_fixed)
mpf_ln10 = def_mpf_constant(ln10_fixed)
@constant_memo
def ln_sqrt2pi_fixed(prec):
wp = prec + 10
# ln(sqrt(2*pi)) = ln(2*pi)/2
return to_fixed(mpf_log(mpf_shift(mpf_pi(wp), 1), wp), prec-1)
@constant_memo
def sqrtpi_fixed(prec):
return sqrt_fixed(pi_fixed(prec), prec)
mpf_sqrtpi = def_mpf_constant(sqrtpi_fixed)
mpf_ln_sqrt2pi = def_mpf_constant(ln_sqrt2pi_fixed)
#----------------------------------------------------------------------------#
# #
# Powers #
# #
#----------------------------------------------------------------------------#
def mpf_pow(s, t, prec, rnd=round_fast):
"""
Compute s**t. Raises ComplexResult if s is negative and t is
fractional.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ssign and texp < 0:
raise ComplexResult("negative number raised to a fractional power")
if texp >= 0:
return mpf_pow_int(s, (-1)**tsign * (tman<<texp), prec, rnd)
# s**(n/2) = sqrt(s)**n
if texp == -1:
if tman == 1:
if tsign:
return mpf_div(fone, mpf_sqrt(s, prec+10,
reciprocal_rnd[rnd]), prec, rnd)
return mpf_sqrt(s, prec, rnd)
else:
if tsign:
return mpf_pow_int(mpf_sqrt(s, prec+10,
reciprocal_rnd[rnd]), -tman, prec, rnd)
return mpf_pow_int(mpf_sqrt(s, prec+10, rnd), tman, prec, rnd)
# General formula: s**t = exp(t*log(s))
# TODO: handle rnd direction of the logarithm carefully
c = mpf_log(s, prec+10, rnd)
return mpf_exp(mpf_mul(t, c), prec, rnd)
def int_pow_fixed(y, n, prec):
"""n-th power of a fixed point number with precision prec
Returns the power in the form man, exp,
man * 2**exp ~= y**n
"""
if n == 2:
return (y*y), 0
bc = bitcount(y)
exp = 0
workprec = 2 * (prec + 4*bitcount(n) + 4)
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*y
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
pm = pm >> (pbc-workprec)
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
y = y*y
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(y >> bc)]
if bc > workprec:
y = y >> (bc-workprec)
exp += bc - workprec
bc = workprec
n = n // 2
return pm, pe
# froot(s, n, prec, rnd) computes the real n-th root of a
# positive mpf tuple s.
# To compute the root we start from a 50-bit estimate for r
# generated with ordinary floating-point arithmetic, and then refine
# the value to full accuracy using the iteration
# 1 / y \
# r = --- | (n-1) * r + ---------- |
# n+1 n \ n r_n**(n-1) /
# which is simply Newton's method applied to the equation r**n = y.
# With giant_steps(start, prec+extra) = [p0,...,pm, prec+extra]
# and y = man * 2**-shift one has
# (man * 2**exp)**(1/n) =
# y**(1/n) * 2**(start-prec/n) * 2**(p0-start) * ... * 2**(prec+extra-pm) *
# 2**((exp+shift-(n-1)*prec)/n -extra))
# The last factor is accounted for in the last line of froot.
def nthroot_fixed(y, n, prec, exp1):
start = 50
try:
y1 = rshift(y, prec - n*start)
r = MPZ(int(y1**(1.0/n)))
except OverflowError:
y1 = from_int(y1, start)
fn = from_int(n)
fn = mpf_rdiv_int(1, fn, start)
r = mpf_pow(y1, fn, start)
r = to_int(r)
extra = 10
extra1 = n
prevp = start
for p in giant_steps(start, prec+extra):
pm, pe = int_pow_fixed(r, n-1, prevp)
r2 = rshift(pm, (n-1)*prevp - p - pe - extra1)
B = lshift(y, 2*p-prec+extra1)//r2
r = (B + (n-1) * lshift(r, p-prevp))//n
prevp = p
return r
def mpf_nthroot(s, n, prec, rnd=round_fast):
"""nth-root of a positive number
Use the Newton method when faster, otherwise use x**(1/n)
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("nth root of a negative number")
if not man:
if s == fnan:
return fnan
if s == fzero:
if n > 0:
return fzero
if n == 0:
return fone
return finf
# Infinity
if not n:
return fnan
if n < 0:
return fzero
return finf
flag_inverse = False
if n < 2:
if n == 0:
return fone
if n == 1:
return mpf_pos(s, prec, rnd)
if n == -1:
return mpf_div(fone, s, prec, rnd)
# n < 0
rnd = reciprocal_rnd[rnd]
flag_inverse = True
extra_inverse = 5
prec += extra_inverse
n = -n
if n > 20 and (n >= 20000 or prec < int(233 + 28.3 * n**0.62)):
prec2 = prec + 10
fn = from_int(n)
nth = mpf_rdiv_int(1, fn, prec2)
r = mpf_pow(s, nth, prec2, rnd)
s = normalize(r[0], r[1], r[2], r[3], prec, rnd)
if flag_inverse:
return mpf_div(fone, s, prec-extra_inverse, rnd)
else:
return s
# Convert to a fixed-point number with prec2 bits.
prec2 = prec + 2*n - (prec%n)
# a few tests indicate that
# for 10 < n < 10**4 a bit more precision is needed
if n > 10:
prec2 += prec2//10
prec2 = prec2 - prec2%n
# Mantissa may have more bits than we need. Trim it down.
shift = bc - prec2
# Adjust exponents to make prec2 and exp+shift multiples of n.
sign1 = 0
es = exp+shift
if es < 0:
sign1 = 1
es = -es
if sign1:
shift += es%n
else:
shift -= es%n
man = rshift(man, shift)
extra = 10
exp1 = ((exp+shift-(n-1)*prec2)//n) - extra
rnd_shift = 0
if flag_inverse:
if rnd == 'u' or rnd == 'c':
rnd_shift = 1
else:
if rnd == 'd' or rnd == 'f':
rnd_shift = 1
man = nthroot_fixed(man+rnd_shift, n, prec2, exp1)
s = from_man_exp(man, exp1, prec, rnd)
if flag_inverse:
return mpf_div(fone, s, prec-extra_inverse, rnd)
else:
return s
def mpf_cbrt(s, prec, rnd=round_fast):
"""cubic root of a positive number"""
return mpf_nthroot(s, 3, prec, rnd)
#----------------------------------------------------------------------------#
# #
# Logarithms #
# #
#----------------------------------------------------------------------------#
def log_int_fixed(n, prec, ln2=None):
"""
Fast computation of log(n), caching the value for small n,
intended for zeta sums.
"""
if n in log_int_cache:
value, vprec = log_int_cache[n]
if vprec >= prec:
return value >> (vprec - prec)
wp = prec + 10
if wp <= LOG_TAYLOR_SHIFT:
if ln2 is None:
ln2 = ln2_fixed(wp)
r = bitcount(n)
x = n << (wp-r)
v = log_taylor_cached(x, wp) + r*ln2
else:
v = to_fixed(mpf_log(from_int(n), wp+5), wp)
if n < MAX_LOG_INT_CACHE:
log_int_cache[n] = (v, wp)
return v >> (wp-prec)
def agm_fixed(a, b, prec):
"""
Fixed-point computation of agm(a,b), assuming
a, b both close to unit magnitude.
"""
i = 0
while 1:
anew = (a+b)>>1
if i > 4 and abs(a-anew) < 8:
return a
b = isqrt_fast(a*b)
a = anew
i += 1
return a
def log_agm(x, prec):
"""
Fixed-point computation of -log(x) = log(1/x), suitable
for large precision. It is required that 0 < x < 1. The
algorithm used is the Sasaki-Kanada formula
-log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1]
For faster convergence in the theta functions, x should
be chosen closer to 0.
Guard bits must be added by the caller.
HYPOTHESIS: if x = 2^(-n), n bits need to be added to
account for the truncation to a fixed-point number,
and this is the only significant cancellation error.
The number of bits lost to roundoff is small and can be
considered constant.
[1] Richard P. Brent, "Fast Algorithms for High-Precision
Computation of Elementary Functions (extended abstract)",
http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf
"""
x2 = (x*x) >> prec
# Compute jtheta2(x)**2
s = a = b = x2
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
s += a
s += (MPZ_ONE<<prec)
s = (s*s)>>(prec-2)
s = (s*isqrt_fast(x<<prec))>>prec
# Compute jtheta3(x)**2
t = a = b = x
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
t += a
t = (MPZ_ONE<<prec) + (t<<1)
t = (t*t)>>prec
# Final formula
p = agm_fixed(s, t, prec)
return (pi_fixed(prec) << prec) // p
def log_taylor(x, prec, r=0):
"""
Fixed-point calculation of log(x). It is assumed that x is close
enough to 1 for the Taylor series to converge quickly. Convergence
can be improved by specifying r > 0 to compute
log(x^(1/2^r))*2^r, at the cost of performing r square roots.
The caller must provide sufficient guard bits.
"""
for i in xrange(r):
x = isqrt_fast(x<<prec)
one = MPZ_ONE << prec
v = ((x-one)<<prec)//(x+one)
sign = v < 0
if sign:
v = -v
v2 = (v*v) >> prec
v4 = (v2*v2) >> prec
s0 = v
s1 = v//3
v = (v*v4) >> prec
k = 5
while v:
s0 += v // k
k += 2
s1 += v // k
v = (v*v4) >> prec
k += 2
s1 = (s1*v2) >> prec
s = (s0+s1) << (1+r)
if sign:
return -s
return s
def log_taylor_cached(x, prec):
"""
Fixed-point computation of log(x), assuming x in (0.5, 2)
and prec <= LOG_TAYLOR_PREC.
"""
n = x >> (prec-LOG_TAYLOR_SHIFT)
cached_prec = cache_prec_steps[prec]
dprec = cached_prec - prec
if (n, cached_prec) in log_taylor_cache:
a, log_a = log_taylor_cache[n, cached_prec]
else:
a = n << (cached_prec - LOG_TAYLOR_SHIFT)
log_a = log_taylor(a, cached_prec, 8)
log_taylor_cache[n, cached_prec] = (a, log_a)
a >>= dprec
log_a >>= dprec
u = ((x - a) << prec) // a
v = (u << prec) // ((MPZ_TWO << prec) + u)
v2 = (v*v) >> prec
v4 = (v2*v2) >> prec
s0 = v
s1 = v//3
v = (v*v4) >> prec
k = 5
while v:
s0 += v//k
k += 2
s1 += v//k
v = (v*v4) >> prec
k += 2
s1 = (s1*v2) >> prec
s = (s0+s1) << 1
return log_a + s
def mpf_log(x, prec, rnd=round_fast):
"""
Compute the natural logarithm of the mpf value x. If x is negative,
ComplexResult is raised.
"""
sign, man, exp, bc = x
#------------------------------------------------------------------
# Handle special values
if not man:
if x == fzero: return fninf
if x == finf: return finf
if x == fnan: return fnan
if sign:
raise ComplexResult("logarithm of a negative number")
wp = prec + 20
#------------------------------------------------------------------
# Handle log(2^n) = log(n)*2.
# Here we catch the only possible exact value, log(1) = 0
if man == 1:
if not exp:
return fzero
return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
mag = exp+bc
abs_mag = abs(mag)
#------------------------------------------------------------------
# Handle x = 1+eps, where log(x) ~ x. We need to check for
# cancellation when moving to fixed-point math and compensate
# by increasing the precision. Note that abs_mag in (0, 1) <=>
# 0.5 < x < 2 and x != 1
if abs_mag <= 1:
# Calculate t = x-1 to measure distance from 1 in bits
tsign = 1-abs_mag
if tsign:
tman = (MPZ_ONE<<bc) - man
else:
tman = man - (MPZ_ONE<<(bc-1))
tbc = bitcount(tman)
cancellation = bc - tbc
if cancellation > wp:
t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')
return mpf_perturb(t, tsign, prec, rnd)
else:
wp += cancellation
# TODO: if close enough to 1, we could use Taylor series
# even in the AGM precision range, since the Taylor series
# converges rapidly
#------------------------------------------------------------------
# Another special case:
# n*log(2) is a good enough approximation
if abs_mag > 10000:
if bitcount(abs_mag) > wp:
return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
#------------------------------------------------------------------
# General case.
# Perform argument reduction using log(x) = log(x*2^n) - n*log(2):
# If we are in the Taylor precision range, choose magnitude 0 or 1.
# If we are in the AGM precision range, choose magnitude -m for
# some large m; benchmarking on one machine showed m = prec/20 to be
# optimal between 1000 and 100,000 digits.
if wp <= LOG_TAYLOR_PREC:
m = log_taylor_cached(lshift(man, wp-bc), wp)
if mag:
m += mag*ln2_fixed(wp)
else:
optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO
n = optimal_mag - mag
x = mpf_shift(x, n)
wp += (-optimal_mag)
m = -log_agm(to_fixed(x, wp), wp)
m -= n*ln2_fixed(wp)
return from_man_exp(m, -wp, prec, rnd)
def mpf_log_hypot(a, b, prec, rnd):
"""
Computes log(sqrt(a^2+b^2)) accurately.
"""
# If either a or b is inf/nan/0, assume it to be a
if not b[1]:
a, b = b, a
# a is inf/nan/0
if not a[1]:
# both are inf/nan/0
if not b[1]:
if a == b == fzero:
return fninf
if fnan in (a, b):
return fnan
# at least one term is (+/- inf)^2
return finf
# only a is inf/nan/0
if a == fzero:
# log(sqrt(0+b^2)) = log(|b|)
return mpf_log(mpf_abs(b), prec, rnd)
if a == fnan:
return fnan
return finf
# Exact
a2 = mpf_mul(a,a)
b2 = mpf_mul(b,b)
extra = 20
# Not exact
h2 = mpf_add(a2, b2, prec+extra)
cancelled = mpf_add(h2, fnone, 10)
mag_cancelled = cancelled[2]+cancelled[3]
# Just redo the sum exactly if necessary (could be smarter
# and avoid memory allocation when a or b is precisely 1
# and the other is tiny...)
if cancelled == fzero or mag_cancelled < -extra//2:
h2 = mpf_add(a2, b2, prec+extra-min(a2[2],b2[2]))
return mpf_shift(mpf_log(h2, prec, rnd), -1)
#----------------------------------------------------------------------
# Inverse tangent
#
def atan_newton(x, prec):
if prec >= 100:
r = math.atan((x>>(prec-53))/2.0**53)
else:
r = math.atan(x/2.0**prec)
prevp = 50
r = MPZ(int(r * 2.0**53) >> (53-prevp))
extra_p = 50
for wp in giant_steps(prevp, prec):
wp += extra_p
r = r << (wp-prevp)
cos, sin = cos_sin_fixed(r, wp)
tan = (sin << wp) // cos
a = ((tan-rshift(x, prec-wp)) << wp) // ((MPZ_ONE<<wp) + ((tan**2)>>wp))
r = r - a
prevp = wp
return rshift(r, prevp-prec)
def atan_taylor_get_cached(n, prec):
# Taylor series with caching wins up to huge precisions
# To avoid unnecessary precomputation at low precision, we
# do it in steps
# Round to next power of 2
prec2 = (1<<(bitcount(prec-1))) + 20
dprec = prec2 - prec
if (n, prec2) in atan_taylor_cache:
a, atan_a = atan_taylor_cache[n, prec2]
else:
a = n << (prec2 - ATAN_TAYLOR_SHIFT)
atan_a = atan_newton(a, prec2)
atan_taylor_cache[n, prec2] = (a, atan_a)
return (a >> dprec), (atan_a >> dprec)
def atan_taylor(x, prec):
n = (x >> (prec-ATAN_TAYLOR_SHIFT))
a, atan_a = atan_taylor_get_cached(n, prec)
d = x - a
s0 = v = (d << prec) // ((a**2 >> prec) + (a*d >> prec) + (MPZ_ONE << prec))
v2 = (v**2 >> prec)
v4 = (v2 * v2) >> prec
s1 = v//3
v = (v * v4) >> prec
k = 5
while v:
s0 += v // k
k += 2
s1 += v // k
v = (v * v4) >> prec
k += 2
s1 = (s1 * v2) >> prec
s = s0 - s1
return atan_a + s
def atan_inf(sign, prec, rnd):
if not sign:
return mpf_shift(mpf_pi(prec, rnd), -1)
return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
def mpf_atan(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if not man:
if x == fzero: return fzero
if x == finf: return atan_inf(0, prec, rnd)
if x == fninf: return atan_inf(1, prec, rnd)
return fnan
mag = exp + bc
# Essentially infinity
if mag > prec+20:
return atan_inf(sign, prec, rnd)
# Essentially ~ x
if -mag > prec+20:
return mpf_perturb(x, 1-sign, prec, rnd)
wp = prec + 30 + abs(mag)
# For large x, use atan(x) = pi/2 - atan(1/x)
if mag >= 2:
x = mpf_rdiv_int(1, x, wp)
reciprocal = True
else:
reciprocal = False
t = to_fixed(x, wp)
if sign:
t = -t
if wp < ATAN_TAYLOR_PREC:
a = atan_taylor(t, wp)
else:
a = atan_newton(t, wp)
if reciprocal:
a = ((pi_fixed(wp)>>1)+1) - a
if sign:
a = -a
return from_man_exp(a, -wp, prec, rnd)
# TODO: cleanup the special cases
def mpf_atan2(y, x, prec, rnd=round_fast):
xsign, xman, xexp, xbc = x
ysign, yman, yexp, ybc = y
if not yman:
if y == fzero and x != fnan:
if mpf_sign(x) >= 0:
return fzero
return mpf_pi(prec, rnd)
if y in (finf, fninf):
if x in (finf, fninf):
return fnan
# pi/2
if y == finf:
return mpf_shift(mpf_pi(prec, rnd), -1)
# -pi/2
return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
return fnan
if ysign:
return mpf_neg(mpf_atan2(mpf_neg(y), x, prec, negative_rnd[rnd]))
if not xman:
if x == fnan:
return fnan
if x == finf:
return fzero
if x == fninf:
return mpf_pi(prec, rnd)
if y == fzero:
return fzero
return mpf_shift(mpf_pi(prec, rnd), -1)
tquo = mpf_atan(mpf_div(y, x, prec+4), prec+4)
if xsign:
return mpf_add(mpf_pi(prec+4), tquo, prec, rnd)
else:
return mpf_pos(tquo, prec, rnd)
def mpf_asin(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if bc+exp > 0 and x not in (fone, fnone):
raise ComplexResult("asin(x) is real only for -1 <= x <= 1")
# asin(x) = 2*atan(x/(1+sqrt(1-x**2)))
wp = prec + 15
a = mpf_mul(x, x)
b = mpf_add(fone, mpf_sqrt(mpf_sub(fone, a, wp), wp), wp)
c = mpf_div(x, b, wp)
return mpf_shift(mpf_atan(c, prec, rnd), 1)
def mpf_acos(x, prec, rnd=round_fast):
# acos(x) = 2*atan(sqrt(1-x**2)/(1+x))
sign, man, exp, bc = x
if bc + exp > 0:
if x not in (fone, fnone):
raise ComplexResult("acos(x) is real only for -1 <= x <= 1")
if x == fnone:
return mpf_pi(prec, rnd)
wp = prec + 15
a = mpf_mul(x, x)
b = mpf_sqrt(mpf_sub(fone, a, wp), wp)
c = mpf_div(b, mpf_add(fone, x, wp), wp)
return mpf_shift(mpf_atan(c, prec, rnd), 1)
def mpf_asinh(x, prec, rnd=round_fast):
wp = prec + 20
sign, man, exp, bc = x
mag = exp+bc
if mag < -8:
if mag < -wp:
return mpf_perturb(x, 1-sign, prec, rnd)
wp += (-mag)
# asinh(x) = log(x+sqrt(x**2+1))
# use reflection symmetry to avoid cancellation
q = mpf_sqrt(mpf_add(mpf_mul(x, x), fone, wp), wp)
q = mpf_add(mpf_abs(x), q, wp)
if sign:
return mpf_neg(mpf_log(q, prec, negative_rnd[rnd]))
else:
return mpf_log(q, prec, rnd)
def mpf_acosh(x, prec, rnd=round_fast):
# acosh(x) = log(x+sqrt(x**2-1))
wp = prec + 15
if mpf_cmp(x, fone) == -1:
raise ComplexResult("acosh(x) is real only for x >= 1")
q = mpf_sqrt(mpf_add(mpf_mul(x,x), fnone, wp), wp)
return mpf_log(mpf_add(x, q, wp), prec, rnd)
def mpf_atanh(x, prec, rnd=round_fast):
# atanh(x) = log((1+x)/(1-x))/2
sign, man, exp, bc = x
if (not man) and exp:
if x in (fzero, fnan):
return x
raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
mag = bc + exp
if mag > 0:
if mag == 1 and man == 1:
return [finf, fninf][sign]
raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
wp = prec + 15
if mag < -8:
if mag < -wp:
return mpf_perturb(x, sign, prec, rnd)
wp += (-mag)
a = mpf_add(x, fone, wp)
b = mpf_sub(fone, x, wp)
return mpf_shift(mpf_log(mpf_div(a, b, wp), prec, rnd), -1)
def mpf_fibonacci(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if not man:
if x == fninf:
return fnan
return x
# F(2^n) ~= 2^(2^n)
size = abs(exp+bc)
if exp >= 0:
# Exact
if size < 10 or size <= bitcount(prec):
return from_int(ifib(to_int(x)), prec, rnd)
# Use the modified Binet formula
wp = prec + size + 20
a = mpf_phi(wp)
b = mpf_add(mpf_shift(a, 1), fnone, wp)
u = mpf_pow(a, x, wp)
v = mpf_cos_pi(x, wp)
v = mpf_div(v, u, wp)
u = mpf_sub(u, v, wp)
u = mpf_div(u, b, prec, rnd)
return u
#-------------------------------------------------------------------------------
# Exponential-type functions
#-------------------------------------------------------------------------------
def exponential_series(x, prec, type=0):
"""
Taylor series for cosh/sinh or cos/sin.
type = 0 -- returns exp(x) (slightly faster than cosh+sinh)
type = 1 -- returns (cosh(x), sinh(x))
type = 2 -- returns (cos(x), sin(x))
"""
if x < 0:
x = -x
sign = 1
else:
sign = 0
r = int(0.5*prec**0.5)
xmag = bitcount(x) - prec
r = max(0, xmag + r)
extra = 10 + 2*max(r,-xmag)
wp = prec + extra
x <<= (extra - r)
one = MPZ_ONE << wp
alt = (type == 2)
if prec < EXP_SERIES_U_CUTOFF:
x2 = a = (x*x) >> wp
x4 = (x2*x2) >> wp
s0 = s1 = MPZ_ZERO
k = 2
while a:
a //= (k-1)*k; s0 += a; k += 2
a //= (k-1)*k; s1 += a; k += 2
a = (a*x4) >> wp
s1 = (x2*s1) >> wp
if alt:
c = s1 - s0 + one
else:
c = s1 + s0 + one
else:
u = int(0.3*prec**0.35)
x2 = a = (x*x) >> wp
xpowers = [one, x2]
for i in xrange(1, u):
xpowers.append((xpowers[-1]*x2)>>wp)
sums = [MPZ_ZERO] * u
k = 2
while a:
for i in xrange(u):
a //= (k-1)*k
if alt and k & 2: sums[i] -= a
else: sums[i] += a
k += 2
a = (a*xpowers[-1]) >> wp
for i in xrange(1, u):
sums[i] = (sums[i]*xpowers[i]) >> wp
c = sum(sums) + one
if type == 0:
s = isqrt_fast(c*c - (one<<wp))
if sign:
v = c - s
else:
v = c + s
for i in xrange(r):
v = (v*v) >> wp
return v >> extra
else:
# Repeatedly apply the double-angle formula
# cosh(2*x) = 2*cosh(x)^2 - 1
# cos(2*x) = 2*cos(x)^2 - 1
pshift = wp-1
for i in xrange(r):
c = ((c*c) >> pshift) - one
# With the abs, this is the same for sinh and sin
s = isqrt_fast(abs((one<<wp) - c*c))
if sign:
s = -s
return (c>>extra), (s>>extra)
def exp_basecase(x, prec):
"""
Compute exp(x) as a fixed-point number. Works for any x,
but for speed should have |x| < 1. For an arbitrary number,
use exp(x) = exp(x-m*log(2)) * 2^m where m = floor(x/log(2)).
"""
if prec > EXP_COSH_CUTOFF:
return exponential_series(x, prec, 0)
r = int(prec**0.5)
prec += r
s0 = s1 = (MPZ_ONE << prec)
k = 2
a = x2 = (x*x) >> prec
while a:
a //= k; s0 += a; k += 1
a //= k; s1 += a; k += 1
a = (a*x2) >> prec
s1 = (s1*x) >> prec
s = s0 + s1
u = r
while r:
s = (s*s) >> prec
r -= 1
return s >> u
def exp_expneg_basecase(x, prec):
"""
Computation of exp(x), exp(-x)
"""
if prec > EXP_COSH_CUTOFF:
cosh, sinh = exponential_series(x, prec, 1)
return cosh+sinh, cosh-sinh
a = exp_basecase(x, prec)
b = (MPZ_ONE << (prec+prec)) // a
return a, b
def cos_sin_basecase(x, prec):
"""
Compute cos(x), sin(x) as fixed-point numbers, assuming x
in [0, pi/2). For an arbitrary number, use x' = x - m*(pi/2)
where m = floor(x/(pi/2)) along with quarter-period symmetries.
"""
if prec > COS_SIN_CACHE_PREC:
return exponential_series(x, prec, 2)
precs = prec - COS_SIN_CACHE_STEP
t = x >> precs
n = int(t)
if n not in cos_sin_cache:
w = t<<(10+COS_SIN_CACHE_PREC-COS_SIN_CACHE_STEP)
cos_t, sin_t = exponential_series(w, 10+COS_SIN_CACHE_PREC, 2)
cos_sin_cache[n] = (cos_t>>10), (sin_t>>10)
cos_t, sin_t = cos_sin_cache[n]
offset = COS_SIN_CACHE_PREC - prec
cos_t >>= offset
sin_t >>= offset
x -= t << precs
cos = MPZ_ONE << prec
sin = x
k = 2
a = -((x*x) >> prec)
while a:
a //= k; cos += a; k += 1; a = (a*x) >> prec
a //= k; sin += a; k += 1; a = -((a*x) >> prec)
return ((cos*cos_t-sin*sin_t) >> prec), ((sin*cos_t+cos*sin_t) >> prec)
def mpf_exp(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if man:
mag = bc + exp
wp = prec + 14
if sign:
man = -man
# TODO: the best cutoff depends on both x and the precision.
if prec > 600 and exp >= 0:
# Need about log2(exp(n)) ~= 1.45*mag extra precision
e = mpf_e(wp+int(1.45*mag))
return mpf_pow_int(e, man<<exp, prec, rnd)
if mag < -wp:
return mpf_perturb(fone, sign, prec, rnd)
# |x| >= 2
if mag > 1:
# For large arguments: exp(2^mag*(1+eps)) =
# exp(2^mag)*exp(2^mag*eps) = exp(2^mag)*(1 + 2^mag*eps + ...)
# so about mag extra bits is required.
wpmod = wp + mag
offset = exp + wpmod
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
lg2 = ln2_fixed(wpmod)
n, t = divmod(t, lg2)
n = int(n)
t >>= mag
else:
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n = 0
man = exp_basecase(t, wp)
return from_man_exp(man, n-wp, prec, rnd)
if not exp:
return fone
if x == fninf:
return fzero
return x
def mpf_cosh_sinh(x, prec, rnd=round_fast, tanh=0):
"""Simultaneously compute (cosh(x), sinh(x)) for real x"""
sign, man, exp, bc = x
if (not man) and exp:
if tanh:
if x == finf: return fone
if x == fninf: return fnone
return fnan
if x == finf: return (finf, finf)
if x == fninf: return (finf, fninf)
return fnan, fnan
mag = exp+bc
wp = prec+14
if mag < -4:
# Extremely close to 0, sinh(x) ~= x and cosh(x) ~= 1
if mag < -wp:
if tanh:
return mpf_perturb(x, 1-sign, prec, rnd)
cosh = mpf_perturb(fone, 0, prec, rnd)
sinh = mpf_perturb(x, sign, prec, rnd)
return cosh, sinh
# Fix for cancellation when computing sinh
wp += (-mag)
# Does exp(-2*x) vanish?
if mag > 10:
if 3*(1<<(mag-1)) > wp:
# XXX: rounding
if tanh:
return mpf_perturb([fone,fnone][sign], 1-sign, prec, rnd)
c = s = mpf_shift(mpf_exp(mpf_abs(x), prec, rnd), -1)
if sign:
s = mpf_neg(s)
return c, s
# |x| > 1
if mag > 1:
wpmod = wp + mag
offset = exp + wpmod
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
lg2 = ln2_fixed(wpmod)
n, t = divmod(t, lg2)
n = int(n)
t >>= mag
else:
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n = 0
a, b = exp_expneg_basecase(t, wp)
# TODO: optimize division precision
cosh = a + (b>>(2*n))
sinh = a - (b>>(2*n))
if sign:
sinh = -sinh
if tanh:
man = (sinh << wp) // cosh
return from_man_exp(man, -wp, prec, rnd)
else:
cosh = from_man_exp(cosh, n-wp-1, prec, rnd)
sinh = from_man_exp(sinh, n-wp-1, prec, rnd)
return cosh, sinh
def mod_pi2(man, exp, mag, wp):
# Reduce to standard interval
if mag > 0:
i = 0
while 1:
cancellation_prec = 20 << i
wpmod = wp + mag + cancellation_prec
pi2 = pi_fixed(wpmod-1)
pi4 = pi2 >> 1
offset = wpmod + exp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n, y = divmod(t, pi2)
if y > pi4:
small = pi2 - y
else:
small = y
if small >> (wp+mag-10):
n = int(n)
t = y >> mag
wp = wpmod - mag
break
i += 1
else:
wp += (-mag)
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n = 0
return t, n, wp
def mpf_cos_sin(x, prec, rnd=round_fast, which=0, pi=False):
"""
which:
0 -- return cos(x), sin(x)
1 -- return cos(x)
2 -- return sin(x)
3 -- return tan(x)
if pi=True, compute for pi*x
"""
sign, man, exp, bc = x
if not man:
if exp:
c, s = fnan, fnan
else:
c, s = fone, fzero
if which == 0: return c, s
if which == 1: return c
if which == 2: return s
if which == 3: return s
mag = bc + exp
wp = prec + 10
# Extremely small?
if mag < 0:
if mag < -wp:
if pi:
x = mpf_mul(x, mpf_pi(wp))
c = mpf_perturb(fone, 1, prec, rnd)
s = mpf_perturb(x, 1-sign, prec, rnd)
if which == 0: return c, s
if which == 1: return c
if which == 2: return s
if which == 3: return mpf_perturb(x, sign, prec, rnd)
if pi:
if exp >= -1:
if exp == -1:
c = fzero
s = (fone, fnone)[bool(man & 2) ^ sign]
elif exp == 0:
c, s = (fnone, fzero)
else:
c, s = (fone, fzero)
if which == 0: return c, s
if which == 1: return c
if which == 2: return s
if which == 3: return mpf_div(s, c, prec, rnd)
# Subtract nearest half-integer (= mod by pi/2)
n = ((man >> (-exp-2)) + 1) >> 1
man = man - (n << (-exp-1))
mag2 = bitcount(man) + exp
wp = prec + 10 - mag2
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
t = (t*pi_fixed(wp)) >> wp
else:
t, n, wp = mod_pi2(man, exp, mag, wp)
c, s = cos_sin_basecase(t, wp)
m = n & 3
if m == 1: c, s = -s, c
elif m == 2: c, s = -c, -s
elif m == 3: c, s = s, -c
if sign:
s = -s
if which == 0:
c = from_man_exp(c, -wp, prec, rnd)
s = from_man_exp(s, -wp, prec, rnd)
return c, s
if which == 1:
return from_man_exp(c, -wp, prec, rnd)
if which == 2:
return from_man_exp(s, -wp, prec, rnd)
if which == 3:
return from_rational(s, c, prec, rnd)
def mpf_cos(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1)
def mpf_sin(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2)
def mpf_tan(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 3)
def mpf_cos_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 0, 1)
def mpf_cos_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1, 1)
def mpf_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2, 1)
def mpf_cosh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[0]
def mpf_sinh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[1]
def mpf_tanh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd, tanh=1)
# Low-overhead fixed-point versions
def cos_sin_fixed(x, prec, pi2=None):
if pi2 is None:
pi2 = pi_fixed(prec-1)
n, t = divmod(x, pi2)
n = int(n)
c, s = cos_sin_basecase(t, prec)
m = n & 3
if m == 0: return c, s
if m == 1: return -s, c
if m == 2: return -c, -s
if m == 3: return s, -c
def exp_fixed(x, prec, ln2=None):
if ln2 is None:
ln2 = ln2_fixed(prec)
n, t = divmod(x, ln2)
n = int(n)
v = exp_basecase(t, prec)
if n >= 0:
return v << n
else:
return v >> (-n)
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as _lbmp
mpf_sqrt = _lbmp.mpf_sqrt
mpf_exp = _lbmp.mpf_exp
mpf_log = _lbmp.mpf_log
mpf_cos = _lbmp.mpf_cos
mpf_sin = _lbmp.mpf_sin
mpf_pow = _lbmp.mpf_pow
exp_fixed = _lbmp.exp_fixed
cos_sin_fixed = _lbmp.cos_sin_fixed
log_int_fixed = _lbmp.log_int_fixed
except (ImportError, AttributeError):
print("Warning: Sage imports in libelefun failed")
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/libelefun.py
|
libelefun.py
|
import operator
import math
from .backend import MPZ_ZERO, MPZ_ONE, BACKEND, xrange, exec_
from .libintmath import gcd
from .libmpf import (\
ComplexResult, round_fast, round_nearest,
negative_rnd, bitcount, to_fixed, from_man_exp, from_int, to_int,
from_rational,
fzero, fone, fnone, ftwo, finf, fninf, fnan,
mpf_sign, mpf_add, mpf_abs, mpf_pos,
mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_min_max,
mpf_perturb, mpf_neg, mpf_shift, mpf_sub, mpf_mul, mpf_div,
sqrt_fixed, mpf_sqrt, mpf_rdiv_int, mpf_pow_int,
to_rational,
)
from .libelefun import (\
mpf_pi, mpf_exp, mpf_log, pi_fixed, mpf_cos_sin, mpf_cos, mpf_sin,
mpf_sqrt, agm_fixed,
)
from .libmpc import (\
mpc_one, mpc_sub, mpc_mul_mpf, mpc_mul, mpc_neg, complex_int_pow,
mpc_div, mpc_add_mpf, mpc_sub_mpf,
mpc_log, mpc_add, mpc_pos, mpc_shift,
mpc_is_infnan, mpc_zero, mpc_sqrt, mpc_abs,
mpc_mpf_div, mpc_square, mpc_exp
)
from .libintmath import ifac
from .gammazeta import mpf_gamma_int, mpf_euler, euler_fixed
class NoConvergence(Exception):
pass
#-----------------------------------------------------------------------#
# #
# Generic hypergeometric series #
# #
#-----------------------------------------------------------------------#
"""
TODO:
1. proper mpq parsing
2. imaginary z special-cased (also: rational, integer?)
3. more clever handling of series that don't converge because of stupid
upwards rounding
4. checking for cancellation
"""
def make_hyp_summator(key):
"""
Returns a function that sums a generalized hypergeometric series,
for given parameter types (integer, rational, real, complex).
"""
p, q, param_types, ztype = key
pstring = "".join(param_types)
fname = "hypsum_%i_%i_%s_%s_%s" % (p, q, pstring[:p], pstring[p:], ztype)
#print "generating hypsum", fname
have_complex_param = 'C' in param_types
have_complex_arg = ztype == 'C'
have_complex = have_complex_param or have_complex_arg
source = []
add = source.append
aint = []
arat = []
bint = []
brat = []
areal = []
breal = []
acomplex = []
bcomplex = []
#add("wp = prec + 40")
add("MAX = kwargs.get('maxterms', wp*100)")
add("HIGH = MPZ_ONE<<epsshift")
add("LOW = -HIGH")
# Setup code
add("SRE = PRE = one = (MPZ_ONE << wp)")
if have_complex:
add("SIM = PIM = MPZ_ZERO")
if have_complex_arg:
add("xsign, xm, xe, xbc = z[0]")
add("if xsign: xm = -xm")
add("ysign, ym, ye, ybc = z[1]")
add("if ysign: ym = -ym")
else:
add("xsign, xm, xe, xbc = z")
add("if xsign: xm = -xm")
add("offset = xe + wp")
add("if offset >= 0:")
add(" ZRE = xm << offset")
add("else:")
add(" ZRE = xm >> (-offset)")
if have_complex_arg:
add("offset = ye + wp")
add("if offset >= 0:")
add(" ZIM = ym << offset")
add("else:")
add(" ZIM = ym >> (-offset)")
for i, flag in enumerate(param_types):
W = ["A", "B"][i >= p]
if flag == 'Z':
([aint,bint][i >= p]).append(i)
add("%sINT_%i = coeffs[%i]" % (W, i, i))
elif flag == 'Q':
([arat,brat][i >= p]).append(i)
add("%sP_%i, %sQ_%i = coeffs[%i]._mpq_" % (W, i, W, i, i))
elif flag == 'R':
([areal,breal][i >= p]).append(i)
add("xsign, xm, xe, xbc = coeffs[%i]._mpf_" % i)
add("if xsign: xm = -xm")
add("offset = xe + wp")
add("if offset >= 0:")
add(" %sREAL_%i = xm << offset" % (W, i))
add("else:")
add(" %sREAL_%i = xm >> (-offset)" % (W, i))
elif flag == 'C':
([acomplex,bcomplex][i >= p]).append(i)
add("__re, __im = coeffs[%i]._mpc_" % i)
add("xsign, xm, xe, xbc = __re")
add("if xsign: xm = -xm")
add("ysign, ym, ye, ybc = __im")
add("if ysign: ym = -ym")
add("offset = xe + wp")
add("if offset >= 0:")
add(" %sCRE_%i = xm << offset" % (W, i))
add("else:")
add(" %sCRE_%i = xm >> (-offset)" % (W, i))
add("offset = ye + wp")
add("if offset >= 0:")
add(" %sCIM_%i = ym << offset" % (W, i))
add("else:")
add(" %sCIM_%i = ym >> (-offset)" % (W, i))
else:
raise ValueError
l_areal = len(areal)
l_breal = len(breal)
cancellable_real = min(l_areal, l_breal)
noncancellable_real_num = areal[cancellable_real:]
noncancellable_real_den = breal[cancellable_real:]
# LOOP
add("for n in xrange(1,10**8):")
add(" if n in magnitude_check:")
add(" p_mag = bitcount(abs(PRE))")
if have_complex:
add(" p_mag = max(p_mag, bitcount(abs(PIM)))")
add(" magnitude_check[n] = wp-p_mag")
# Real factors
multiplier = " * ".join(["AINT_#".replace("#", str(i)) for i in aint] + \
["AP_#".replace("#", str(i)) for i in arat] + \
["BQ_#".replace("#", str(i)) for i in brat])
divisor = " * ".join(["BINT_#".replace("#", str(i)) for i in bint] + \
["BP_#".replace("#", str(i)) for i in brat] + \
["AQ_#".replace("#", str(i)) for i in arat] + ["n"])
if multiplier:
add(" mul = " + multiplier)
add(" div = " + divisor)
# Check for singular terms
add(" if not div:")
if multiplier:
add(" if not mul:")
add(" break")
add(" raise ZeroDivisionError")
# Update product
if have_complex:
# TODO: when there are several real parameters and just a few complex
# (maybe just the complex argument), we only need to do about
# half as many ops if we accumulate the real factor in a single real variable
for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k]))
for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i)))
for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i)))
for k in range(cancellable_real): add(" PIM = PIM * AREAL_%i // BREAL_%i" % (areal[k], breal[k]))
for i in noncancellable_real_num: add(" PIM = (PIM * AREAL_#) >> wp".replace("#", str(i)))
for i in noncancellable_real_den: add(" PIM = (PIM << wp) // BREAL_#".replace("#", str(i)))
if multiplier:
if have_complex_arg:
add(" PRE, PIM = (mul*(PRE*ZRE-PIM*ZIM))//div, (mul*(PIM*ZRE+PRE*ZIM))//div")
add(" PRE >>= wp")
add(" PIM >>= wp")
else:
add(" PRE = ((mul * PRE * ZRE) >> wp) // div")
add(" PIM = ((mul * PIM * ZRE) >> wp) // div")
else:
if have_complex_arg:
add(" PRE, PIM = (PRE*ZRE-PIM*ZIM)//div, (PIM*ZRE+PRE*ZIM)//div")
add(" PRE >>= wp")
add(" PIM >>= wp")
else:
add(" PRE = ((PRE * ZRE) >> wp) // div")
add(" PIM = ((PIM * ZRE) >> wp) // div")
for i in acomplex:
add(" PRE, PIM = PRE*ACRE_#-PIM*ACIM_#, PIM*ACRE_#+PRE*ACIM_#".replace("#", str(i)))
add(" PRE >>= wp")
add(" PIM >>= wp")
for i in bcomplex:
add(" mag = BCRE_#*BCRE_#+BCIM_#*BCIM_#".replace("#", str(i)))
add(" re = PRE*BCRE_# + PIM*BCIM_#".replace("#", str(i)))
add(" im = PIM*BCRE_# - PRE*BCIM_#".replace("#", str(i)))
add(" PRE = (re << wp) // mag".replace("#", str(i)))
add(" PIM = (im << wp) // mag".replace("#", str(i)))
else:
for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k]))
for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i)))
for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i)))
if multiplier:
add(" PRE = ((PRE * mul * ZRE) >> wp) // div")
else:
add(" PRE = ((PRE * ZRE) >> wp) // div")
# Add product to sum
if have_complex:
add(" SRE += PRE")
add(" SIM += PIM")
add(" if (HIGH > PRE > LOW) and (HIGH > PIM > LOW):")
add(" break")
else:
add(" SRE += PRE")
add(" if HIGH > PRE > LOW:")
add(" break")
#add(" from mpmath import nprint, log, ldexp")
#add(" nprint([n, log(abs(PRE),2), ldexp(PRE,-wp)])")
add(" if n > MAX:")
add(" raise NoConvergence('Hypergeometric series converges too slowly. Try increasing maxterms.')")
# +1 all parameters for next loop
for i in aint: add(" AINT_# += 1".replace("#", str(i)))
for i in bint: add(" BINT_# += 1".replace("#", str(i)))
for i in arat: add(" AP_# += AQ_#".replace("#", str(i)))
for i in brat: add(" BP_# += BQ_#".replace("#", str(i)))
for i in areal: add(" AREAL_# += one".replace("#", str(i)))
for i in breal: add(" BREAL_# += one".replace("#", str(i)))
for i in acomplex: add(" ACRE_# += one".replace("#", str(i)))
for i in bcomplex: add(" BCRE_# += one".replace("#", str(i)))
if have_complex:
add("a = from_man_exp(SRE, -wp, prec, 'n')")
add("b = from_man_exp(SIM, -wp, prec, 'n')")
add("if SRE:")
add(" if SIM:")
add(" magn = max(a[2]+a[3], b[2]+b[3])")
add(" else:")
add(" magn = a[2]+a[3]")
add("elif SIM:")
add(" magn = b[2]+b[3]")
add("else:")
add(" magn = -wp+1")
add("return (a, b), True, magn")
else:
add("a = from_man_exp(SRE, -wp, prec, 'n')")
add("if SRE:")
add(" magn = a[2]+a[3]")
add("else:")
add(" magn = -wp+1")
add("return a, False, magn")
source = "\n".join((" " + line) for line in source)
source = ("def %s(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs):\n" % fname) + source
namespace = {}
exec_(source, globals(), namespace)
#print source
return source, namespace[fname]
if BACKEND == 'sage':
def make_hyp_summator(key):
"""
Returns a function that sums a generalized hypergeometric series,
for given parameter types (integer, rational, real, complex).
"""
from sage.libs.mpmath.ext_main import hypsum_internal
p, q, param_types, ztype = key
def _hypsum(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs):
return hypsum_internal(p, q, param_types, ztype, coeffs, z,
prec, wp, epsshift, magnitude_check, kwargs)
return "(none)", _hypsum
#-----------------------------------------------------------------------#
# #
# Error functions #
# #
#-----------------------------------------------------------------------#
# TODO: mpf_erf should call mpf_erfc when appropriate (currently
# only the converse delegation is implemented)
def mpf_erf(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if not man:
if x == fzero: return fzero
if x == finf: return fone
if x== fninf: return fnone
return fnan
size = exp + bc
lg = math.log
# The approximation erf(x) = 1 is accurate to > x^2 * log(e,2) bits
if size > 3 and 2*(size-1) + 0.528766 > lg(prec,2):
if sign:
return mpf_perturb(fnone, 0, prec, rnd)
else:
return mpf_perturb(fone, 1, prec, rnd)
# erf(x) ~ 2*x/sqrt(pi) close to 0
if size < -prec:
# 2*x
x = mpf_shift(x,1)
c = mpf_sqrt(mpf_pi(prec+20), prec+20)
# TODO: interval rounding
return mpf_div(x, c, prec, rnd)
wp = prec + abs(size) + 25
# Taylor series for erf, fixed-point summation
t = abs(to_fixed(x, wp))
t2 = (t*t) >> wp
s, term, k = t, 12345, 1
while term:
t = ((t * t2) >> wp) // k
term = t // (2*k+1)
if k & 1:
s -= term
else:
s += term
k += 1
s = (s << (wp+1)) // sqrt_fixed(pi_fixed(wp), wp)
if sign:
s = -s
return from_man_exp(s, -wp, prec, rnd)
# If possible, we use the asymptotic series for erfc.
# This is an alternating divergent asymptotic series, so
# the error is at most equal to the first omitted term.
# Here we check if the smallest term is small enough
# for a given x and precision
def erfc_check_series(x, prec):
n = to_int(x)
if n**2 * 1.44 > prec:
return True
return False
def mpf_erfc(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if not man:
if x == fzero: return fone
if x == finf: return fzero
if x == fninf: return ftwo
return fnan
wp = prec + 20
mag = bc+exp
# Preserve full accuracy when exponent grows huge
wp += max(0, 2*mag)
regular_erf = sign or mag < 2
if regular_erf or not erfc_check_series(x, wp):
if regular_erf:
return mpf_sub(fone, mpf_erf(x, prec+10, negative_rnd[rnd]), prec, rnd)
# 1-erf(x) ~ exp(-x^2), increase prec to deal with cancellation
n = to_int(x)+1
return mpf_sub(fone, mpf_erf(x, prec + int(n**2*1.44) + 10), prec, rnd)
s = term = MPZ_ONE << wp
term_prev = 0
t = (2 * to_fixed(x, wp) ** 2) >> wp
k = 1
while 1:
term = ((term * (2*k - 1)) << wp) // t
if k > 4 and term > term_prev or not term:
break
if k & 1:
s -= term
else:
s += term
term_prev = term
#print k, to_str(from_man_exp(term, -wp, 50), 10)
k += 1
s = (s << wp) // sqrt_fixed(pi_fixed(wp), wp)
s = from_man_exp(s, -wp, wp)
z = mpf_exp(mpf_neg(mpf_mul(x,x,wp),wp),wp)
y = mpf_div(mpf_mul(z, s, wp), x, prec, rnd)
return y
#-----------------------------------------------------------------------#
# #
# Exponential integrals #
# #
#-----------------------------------------------------------------------#
def ei_taylor(x, prec):
s = t = x
k = 2
while t:
t = ((t*x) >> prec) // k
s += t // k
k += 1
return s
def complex_ei_taylor(zre, zim, prec):
_abs = abs
sre = tre = zre
sim = tim = zim
k = 2
while _abs(tre) + _abs(tim) > 5:
tre, tim = ((tre*zre-tim*zim)//k)>>prec, ((tre*zim+tim*zre)//k)>>prec
sre += tre // k
sim += tim // k
k += 1
return sre, sim
def ei_asymptotic(x, prec):
one = MPZ_ONE << prec
x = t = ((one << prec) // x)
s = one + x
k = 2
while t:
t = (k*t*x) >> prec
s += t
k += 1
return s
def complex_ei_asymptotic(zre, zim, prec):
_abs = abs
one = MPZ_ONE << prec
M = (zim*zim + zre*zre) >> prec
# 1 / z
xre = tre = (zre << prec) // M
xim = tim = ((-zim) << prec) // M
sre = one + xre
sim = xim
k = 2
while _abs(tre) + _abs(tim) > 1000:
#print tre, tim
tre, tim = ((tre*xre-tim*xim)*k)>>prec, ((tre*xim+tim*xre)*k)>>prec
sre += tre
sim += tim
k += 1
if k > prec:
raise NoConvergence
return sre, sim
def mpf_ei(x, prec, rnd=round_fast, e1=False):
if e1:
x = mpf_neg(x)
sign, man, exp, bc = x
if e1 and not sign:
if x == fzero:
return finf
raise ComplexResult("E1(x) for x < 0")
if man:
xabs = 0, man, exp, bc
xmag = exp+bc
wp = prec + 20
can_use_asymp = xmag > wp
if not can_use_asymp:
if exp >= 0:
xabsint = man << exp
else:
xabsint = man >> (-exp)
can_use_asymp = xabsint > int(wp*0.693) + 10
if can_use_asymp:
if xmag > wp:
v = fone
else:
v = from_man_exp(ei_asymptotic(to_fixed(x, wp), wp), -wp)
v = mpf_mul(v, mpf_exp(x, wp), wp)
v = mpf_div(v, x, prec, rnd)
else:
wp += 2*int(to_int(xabs))
u = to_fixed(x, wp)
v = ei_taylor(u, wp) + euler_fixed(wp)
t1 = from_man_exp(v,-wp)
t2 = mpf_log(xabs,wp)
v = mpf_add(t1, t2, prec, rnd)
else:
if x == fzero: v = fninf
elif x == finf: v = finf
elif x == fninf: v = fzero
else: v = fnan
if e1:
v = mpf_neg(v)
return v
def mpc_ei(z, prec, rnd=round_fast, e1=False):
if e1:
z = mpc_neg(z)
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero:
if e1:
x = mpf_neg(mpf_ei(a, prec, rnd))
if not asign:
y = mpf_neg(mpf_pi(prec, rnd))
else:
y = fzero
return x, y
else:
return mpf_ei(a, prec, rnd), fzero
if a != fzero:
if not aman or not bman:
return (fnan, fnan)
wp = prec + 40
amag = aexp+abc
bmag = bexp+bbc
zmag = max(amag, bmag)
can_use_asymp = zmag > wp
if not can_use_asymp:
zabsint = abs(to_int(a)) + abs(to_int(b))
can_use_asymp = zabsint > int(wp*0.693) + 20
try:
if can_use_asymp:
if zmag > wp:
v = fone, fzero
else:
zre = to_fixed(a, wp)
zim = to_fixed(b, wp)
vre, vim = complex_ei_asymptotic(zre, zim, wp)
v = from_man_exp(vre, -wp), from_man_exp(vim, -wp)
v = mpc_mul(v, mpc_exp(z, wp), wp)
v = mpc_div(v, z, wp)
if e1:
v = mpc_neg(v, prec, rnd)
else:
x, y = v
if bsign:
v = mpf_pos(x, prec, rnd), mpf_sub(y, mpf_pi(wp), prec, rnd)
else:
v = mpf_pos(x, prec, rnd), mpf_add(y, mpf_pi(wp), prec, rnd)
return v
except NoConvergence:
pass
#wp += 2*max(0,zmag)
wp += 2*int(to_int(mpc_abs(z, 5)))
zre = to_fixed(a, wp)
zim = to_fixed(b, wp)
vre, vim = complex_ei_taylor(zre, zim, wp)
vre += euler_fixed(wp)
v = from_man_exp(vre,-wp), from_man_exp(vim,-wp)
if e1:
u = mpc_log(mpc_neg(z),wp)
else:
u = mpc_log(z,wp)
v = mpc_add(v, u, prec, rnd)
if e1:
v = mpc_neg(v)
return v
def mpf_e1(x, prec, rnd=round_fast):
return mpf_ei(x, prec, rnd, True)
def mpc_e1(x, prec, rnd=round_fast):
return mpc_ei(x, prec, rnd, True)
def mpf_expint(n, x, prec, rnd=round_fast, gamma=False):
"""
E_n(x), n an integer, x real
With gamma=True, computes Gamma(n,x) (upper incomplete gamma function)
Returns (real, None) if real, otherwise (real, imag)
The imaginary part is an optional branch cut term
"""
sign, man, exp, bc = x
if not man:
if gamma:
if x == fzero:
# Actually gamma function pole
if n <= 0:
return finf, None
return mpf_gamma_int(n, prec, rnd), None
if x == finf:
return fzero, None
# TODO: could return finite imaginary value at -inf
return fnan, fnan
else:
if x == fzero:
if n > 1:
return from_rational(1, n-1, prec, rnd), None
else:
return finf, None
if x == finf:
return fzero, None
return fnan, fnan
n_orig = n
if gamma:
n = 1-n
wp = prec + 20
xmag = exp + bc
# Beware of near-poles
if xmag < -10:
raise NotImplementedError
nmag = bitcount(abs(n))
have_imag = n > 0 and sign
negx = mpf_neg(x)
# Skip series if direct convergence
if n == 0 or 2*nmag - xmag < -wp:
if gamma:
v = mpf_exp(negx, wp)
re = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), prec, rnd)
else:
v = mpf_exp(negx, wp)
re = mpf_div(v, x, prec, rnd)
else:
# Finite number of terms, or...
can_use_asymptotic_series = -3*wp < n <= 0
# ...large enough?
if not can_use_asymptotic_series:
xi = abs(to_int(x))
m = min(max(1, xi-n), 2*wp)
siz = -n*nmag + (m+n)*bitcount(abs(m+n)) - m*xmag - (144*m//100)
tol = -wp-10
can_use_asymptotic_series = siz < tol
if can_use_asymptotic_series:
r = ((-MPZ_ONE) << (wp+wp)) // to_fixed(x, wp)
m = n
t = r*m
s = MPZ_ONE << wp
while m and t:
s += t
m += 1
t = (m*r*t) >> wp
v = mpf_exp(negx, wp)
if gamma:
# ~ exp(-x) * x^(n-1) * (1 + ...)
v = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), wp)
else:
# ~ exp(-x)/x * (1 + ...)
v = mpf_div(v, x, wp)
re = mpf_mul(v, from_man_exp(s, -wp), prec, rnd)
elif n == 1:
re = mpf_neg(mpf_ei(negx, prec, rnd))
elif n > 0 and n < 3*wp:
T1 = mpf_neg(mpf_ei(negx, wp))
if gamma:
if n_orig & 1:
T1 = mpf_neg(T1)
else:
T1 = mpf_mul(T1, mpf_pow_int(negx, n-1, wp), wp)
r = t = to_fixed(x, wp)
facs = [1] * (n-1)
for k in range(1,n-1):
facs[k] = facs[k-1] * k
facs = facs[::-1]
s = facs[0] << wp
for k in range(1, n-1):
if k & 1:
s -= facs[k] * t
else:
s += facs[k] * t
t = (t*r) >> wp
T2 = from_man_exp(s, -wp, wp)
T2 = mpf_mul(T2, mpf_exp(negx, wp))
if gamma:
T2 = mpf_mul(T2, mpf_pow_int(x, n_orig, wp), wp)
R = mpf_add(T1, T2)
re = mpf_div(R, from_int(ifac(n-1)), prec, rnd)
else:
raise NotImplementedError
if have_imag:
M = from_int(-ifac(n-1))
if gamma:
im = mpf_div(mpf_pi(wp), M, prec, rnd)
else:
im = mpf_div(mpf_mul(mpf_pi(wp), mpf_pow_int(negx, n_orig-1, wp), wp), M, prec, rnd)
return re, im
else:
return re, None
def mpf_ci_si_taylor(x, wp, which=0):
"""
0 - Ci(x) - (euler+log(x))
1 - Si(x)
"""
x = to_fixed(x, wp)
x2 = -(x*x) >> wp
if which == 0:
s, t, k = 0, (MPZ_ONE<<wp), 2
else:
s, t, k = x, x, 3
while t:
t = (t*x2//(k*(k-1)))>>wp
s += t//k
k += 2
return from_man_exp(s, -wp)
def mpc_ci_si_taylor(re, im, wp, which=0):
# The following code is only designed for small arguments,
# and not too small arguments (for relative accuracy)
if re[1]:
mag = re[2]+re[3]
elif im[1]:
mag = im[2]+im[3]
if im[1]:
mag = max(mag, im[2]+im[3])
if mag > 2 or mag < -wp:
raise NotImplementedError
wp += (2-mag)
zre = to_fixed(re, wp)
zim = to_fixed(im, wp)
z2re = (zim*zim-zre*zre)>>wp
z2im = (-2*zre*zim)>>wp
tre = zre
tim = zim
one = MPZ_ONE<<wp
if which == 0:
sre, sim, tre, tim, k = 0, 0, (MPZ_ONE<<wp), 0, 2
else:
sre, sim, tre, tim, k = zre, zim, zre, zim, 3
while max(abs(tre), abs(tim)) > 2:
f = k*(k-1)
tre, tim = ((tre*z2re-tim*z2im)//f)>>wp, ((tre*z2im+tim*z2re)//f)>>wp
sre += tre//k
sim += tim//k
k += 2
return from_man_exp(sre, -wp), from_man_exp(sim, -wp)
def mpf_ci_si(x, prec, rnd=round_fast, which=2):
"""
Calculation of Ci(x), Si(x) for real x.
which = 0 -- returns (Ci(x), -)
which = 1 -- returns (Si(x), -)
which = 2 -- returns (Ci(x), Si(x))
Note: if x < 0, Ci(x) needs an additional imaginary term, pi*i.
"""
wp = prec + 20
sign, man, exp, bc = x
ci, si = None, None
if not man:
if x == fzero:
return (fninf, fzero)
if x == fnan:
return (x, x)
ci = fzero
if which != 0:
if x == finf:
si = mpf_shift(mpf_pi(prec, rnd), -1)
if x == fninf:
si = mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
return (ci, si)
# For small x: Ci(x) ~ euler + log(x), Si(x) ~ x
mag = exp+bc
if mag < -wp:
if which != 0:
si = mpf_perturb(x, 1-sign, prec, rnd)
if which != 1:
y = mpf_euler(wp)
xabs = mpf_abs(x)
ci = mpf_add(y, mpf_log(xabs, wp), prec, rnd)
return ci, si
# For huge x: Ci(x) ~ sin(x)/x, Si(x) ~ pi/2
elif mag > wp:
if which != 0:
if sign:
si = mpf_neg(mpf_pi(prec, negative_rnd[rnd]))
else:
si = mpf_pi(prec, rnd)
si = mpf_shift(si, -1)
if which != 1:
ci = mpf_div(mpf_sin(x, wp), x, prec, rnd)
return ci, si
else:
wp += abs(mag)
# Use an asymptotic series? The smallest value of n!/x^n
# occurs for n ~ x, where the magnitude is ~ exp(-x).
asymptotic = mag-1 > math.log(wp, 2)
# Case 1: convergent series near 0
if not asymptotic:
if which != 0:
si = mpf_pos(mpf_ci_si_taylor(x, wp, 1), prec, rnd)
if which != 1:
ci = mpf_ci_si_taylor(x, wp, 0)
ci = mpf_add(ci, mpf_euler(wp), wp)
ci = mpf_add(ci, mpf_log(mpf_abs(x), wp), prec, rnd)
return ci, si
x = mpf_abs(x)
# Case 2: asymptotic series for x >> 1
xf = to_fixed(x, wp)
xr = (MPZ_ONE<<(2*wp)) // xf # 1/x
s1 = (MPZ_ONE << wp)
s2 = xr
t = xr
k = 2
while t:
t = -t
t = (t*xr*k)>>wp
k += 1
s1 += t
t = (t*xr*k)>>wp
k += 1
s2 += t
s1 = from_man_exp(s1, -wp)
s2 = from_man_exp(s2, -wp)
s1 = mpf_div(s1, x, wp)
s2 = mpf_div(s2, x, wp)
cos, sin = mpf_cos_sin(x, wp)
# Ci(x) = sin(x)*s1-cos(x)*s2
# Si(x) = pi/2-cos(x)*s1-sin(x)*s2
if which != 0:
si = mpf_add(mpf_mul(cos, s1), mpf_mul(sin, s2), wp)
si = mpf_sub(mpf_shift(mpf_pi(wp), -1), si, wp)
if sign:
si = mpf_neg(si)
si = mpf_pos(si, prec, rnd)
if which != 1:
ci = mpf_sub(mpf_mul(sin, s1), mpf_mul(cos, s2), prec, rnd)
return ci, si
def mpf_ci(x, prec, rnd=round_fast):
if mpf_sign(x) < 0:
raise ComplexResult
return mpf_ci_si(x, prec, rnd, 0)[0]
def mpf_si(x, prec, rnd=round_fast):
return mpf_ci_si(x, prec, rnd, 1)[1]
def mpc_ci(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
ci = mpf_ci_si(re, prec, rnd, 0)[0]
if mpf_sign(re) < 0:
return (ci, mpf_pi(prec, rnd))
return (ci, fzero)
wp = prec + 20
cre, cim = mpc_ci_si_taylor(re, im, wp, 0)
cre = mpf_add(cre, mpf_euler(wp), wp)
ci = mpc_add((cre, cim), mpc_log(z, wp), prec, rnd)
return ci
def mpc_si(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
return (mpf_ci_si(re, prec, rnd, 1)[1], fzero)
wp = prec + 20
z = mpc_ci_si_taylor(re, im, wp, 1)
return mpc_pos(z, prec, rnd)
#-----------------------------------------------------------------------#
# #
# Bessel functions #
# #
#-----------------------------------------------------------------------#
# A Bessel function of the first kind of integer order, J_n(x), is
# given by the power series
# oo
# ___ k 2 k + n
# \ (-1) / x \
# J_n(x) = ) ----------- | - |
# /___ k! (k + n)! \ 2 /
# k = 0
# Simplifying the quotient between two successive terms gives the
# ratio x^2 / (-4*k*(k+n)). Hence, we only need one full-precision
# multiplication and one division by a small integer per term.
# The complex version is very similar, the only difference being
# that the multiplication is actually 4 multiplies.
# In the general case, we have
# J_v(x) = (x/2)**v / v! * 0F1(v+1, (-1/4)*z**2)
# TODO: for extremely large x, we could use an asymptotic
# trigonometric approximation.
# TODO: recompute at higher precision if the fixed-point mantissa
# is very small
def mpf_besseljn(n, x, prec, rounding=round_fast):
prec += 50
negate = n < 0 and n & 1
mag = x[2]+x[3]
n = abs(n)
wp = prec + 20 + n*bitcount(n)
if mag < 0:
wp -= n * mag
x = to_fixed(x, wp)
x2 = (x**2) >> wp
if not n:
s = t = MPZ_ONE << wp
else:
s = t = (x**n // ifac(n)) >> ((n-1)*wp + n)
k = 1
while t:
t = ((t * x2) // (-4*k*(k+n))) >> wp
s += t
k += 1
if negate:
s = -s
return from_man_exp(s, -wp, prec, rounding)
def mpc_besseljn(n, z, prec, rounding=round_fast):
negate = n < 0 and n & 1
n = abs(n)
origprec = prec
zre, zim = z
mag = max(zre[2]+zre[3], zim[2]+zim[3])
prec += 20 + n*bitcount(n) + abs(mag)
if mag < 0:
prec -= n * mag
zre = to_fixed(zre, prec)
zim = to_fixed(zim, prec)
z2re = (zre**2 - zim**2) >> prec
z2im = (zre*zim) >> (prec-1)
if not n:
sre = tre = MPZ_ONE << prec
sim = tim = MPZ_ZERO
else:
re, im = complex_int_pow(zre, zim, n)
sre = tre = (re // ifac(n)) >> ((n-1)*prec + n)
sim = tim = (im // ifac(n)) >> ((n-1)*prec + n)
k = 1
while abs(tre) + abs(tim) > 3:
p = -4*k*(k+n)
tre, tim = tre*z2re - tim*z2im, tim*z2re + tre*z2im
tre = (tre // p) >> prec
tim = (tim // p) >> prec
sre += tre
sim += tim
k += 1
if negate:
sre = -sre
sim = -sim
re = from_man_exp(sre, -prec, origprec, rounding)
im = from_man_exp(sim, -prec, origprec, rounding)
return (re, im)
def mpf_agm(a, b, prec, rnd=round_fast):
"""
Computes the arithmetic-geometric mean agm(a,b) for
nonnegative mpf values a, b.
"""
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if asign or bsign:
raise ComplexResult("agm of a negative number")
# Handle inf, nan or zero in either operand
if not (aman and bman):
if a == fnan or b == fnan:
return fnan
if a == finf:
if b == fzero:
return fnan
return finf
if b == finf:
if a == fzero:
return fnan
return finf
# agm(0,x) = agm(x,0) = 0
return fzero
wp = prec + 20
amag = aexp+abc
bmag = bexp+bbc
mag_delta = amag - bmag
# Reduce to roughly the same magnitude using floating-point AGM
abs_mag_delta = abs(mag_delta)
if abs_mag_delta > 10:
while abs_mag_delta > 10:
a, b = mpf_shift(mpf_add(a,b,wp),-1), \
mpf_sqrt(mpf_mul(a,b,wp),wp)
abs_mag_delta //= 2
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
amag = aexp+abc
bmag = bexp+bbc
mag_delta = amag - bmag
#print to_float(a), to_float(b)
# Use agm(a,b) = agm(x*a,x*b)/x to obtain a, b ~= 1
min_mag = min(amag,bmag)
max_mag = max(amag,bmag)
n = 0
# If too small, we lose precision when going to fixed-point
if min_mag < -8:
n = -min_mag
# If too large, we waste time using fixed-point with large numbers
elif max_mag > 20:
n = -max_mag
if n:
a = mpf_shift(a, n)
b = mpf_shift(b, n)
#print to_float(a), to_float(b)
af = to_fixed(a, wp)
bf = to_fixed(b, wp)
g = agm_fixed(af, bf, wp)
return from_man_exp(g, -wp-n, prec, rnd)
def mpf_agm1(a, prec, rnd=round_fast):
"""
Computes the arithmetic-geometric mean agm(1,a) for a nonnegative
mpf value a.
"""
return mpf_agm(fone, a, prec, rnd)
def mpc_agm(a, b, prec, rnd=round_fast):
"""
Complex AGM.
TODO:
* check that convergence works as intended
* optimize
* select a nonarbitrary branch
"""
if mpc_is_infnan(a) or mpc_is_infnan(b):
return fnan, fnan
if mpc_zero in (a, b):
return fzero, fzero
if mpc_neg(a) == b:
return fzero, fzero
wp = prec+20
eps = mpf_shift(fone, -wp+10)
while 1:
a1 = mpc_shift(mpc_add(a, b, wp), -1)
b1 = mpc_sqrt(mpc_mul(a, b, wp), wp)
a, b = a1, b1
size = mpf_min_max([mpc_abs(a,10), mpc_abs(b,10)])[1]
err = mpc_abs(mpc_sub(a, b, 10), 10)
if size == fzero or mpf_lt(err, mpf_mul(eps, size)):
return a
def mpc_agm1(a, prec, rnd=round_fast):
return mpc_agm(mpc_one, a, prec, rnd)
def mpf_ellipk(x, prec, rnd=round_fast):
if not x[1]:
if x == fzero:
return mpf_shift(mpf_pi(prec, rnd), -1)
if x == fninf:
return fzero
if x == fnan:
return x
if x == fone:
return finf
# TODO: for |x| << 1/2, one could use fall back to
# pi/2 * hyp2f1_rat((1,2),(1,2),(1,1), x)
wp = prec + 15
# Use K(x) = pi/2/agm(1,a) where a = sqrt(1-x)
# The sqrt raises ComplexResult if x > 0
a = mpf_sqrt(mpf_sub(fone, x, wp), wp)
v = mpf_agm1(a, wp)
r = mpf_div(mpf_pi(wp), v, prec, rnd)
return mpf_shift(r, -1)
def mpc_ellipk(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
if re == finf:
return mpc_zero
if mpf_le(re, fone):
return mpf_ellipk(re, prec, rnd), fzero
wp = prec + 15
a = mpc_sqrt(mpc_sub(mpc_one, z, wp), wp)
v = mpc_agm1(a, wp)
r = mpc_mpf_div(mpf_pi(wp), v, prec, rnd)
return mpc_shift(r, -1)
def mpf_ellipe(x, prec, rnd=round_fast):
# http://functions.wolfram.com/EllipticIntegrals/
# EllipticK/20/01/0001/
# E = (1-m)*(K'(m)*2*m + K(m))
sign, man, exp, bc = x
if not man:
if x == fzero:
return mpf_shift(mpf_pi(prec, rnd), -1)
if x == fninf:
return finf
if x == fnan:
return x
if x == finf:
raise ComplexResult
if x == fone:
return fone
wp = prec+20
mag = exp+bc
if mag < -wp:
return mpf_shift(mpf_pi(prec, rnd), -1)
# Compute a finite difference for K'
p = max(mag, 0) - wp
h = mpf_shift(fone, p)
K = mpf_ellipk(x, 2*wp)
Kh = mpf_ellipk(mpf_sub(x, h), 2*wp)
Kdiff = mpf_shift(mpf_sub(K, Kh), -p)
t = mpf_sub(fone, x)
b = mpf_mul(Kdiff, mpf_shift(x,1), wp)
return mpf_mul(t, mpf_add(K, b), prec, rnd)
def mpc_ellipe(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
if re == finf:
return (fzero, finf)
if mpf_le(re, fone):
return mpf_ellipe(re, prec, rnd), fzero
wp = prec + 15
mag = mpc_abs(z, 1)
p = max(mag[2]+mag[3], 0) - wp
h = mpf_shift(fone, p)
K = mpc_ellipk(z, 2*wp)
Kh = mpc_ellipk(mpc_add_mpf(z, h, 2*wp), 2*wp)
Kdiff = mpc_shift(mpc_sub(Kh, K, wp), -p)
t = mpc_sub(mpc_one, z, wp)
b = mpc_mul(Kdiff, mpc_shift(z,1), wp)
return mpc_mul(t, mpc_add(K, b, wp), prec, rnd)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/libhyper.py
|
libhyper.py
|
from .libmpf import (prec_to_dps, dps_to_prec, repr_dps,
round_down, round_up, round_floor, round_ceiling, round_nearest,
to_pickable, from_pickable, ComplexResult,
fzero, fnzero, fone, fnone, ftwo, ften, fhalf, fnan, finf, fninf,
math_float_inf, round_int, normalize, normalize1,
from_man_exp, from_int, to_man_exp, to_int, mpf_ceil, mpf_floor,
mpf_nint, mpf_frac,
from_float, to_float, from_rational, to_rational, to_fixed,
mpf_rand, mpf_eq, mpf_hash, mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_ge,
mpf_pos, mpf_neg, mpf_abs, mpf_sign, mpf_add, mpf_sub, mpf_sum,
mpf_mul, mpf_mul_int, mpf_shift, mpf_frexp,
mpf_div, mpf_rdiv_int, mpf_mod, mpf_pow_int,
mpf_perturb,
to_digits_exp, to_str, str_to_man_exp, from_str, from_bstr, to_bstr,
mpf_sqrt, mpf_hypot)
from .libmpc import (mpc_one, mpc_zero, mpc_two, mpc_half,
mpc_is_inf, mpc_is_infnan, mpc_to_str, mpc_to_complex, mpc_hash,
mpc_conjugate, mpc_is_nonzero, mpc_add, mpc_add_mpf,
mpc_sub, mpc_sub_mpf, mpc_pos, mpc_neg, mpc_shift, mpc_abs,
mpc_arg, mpc_floor, mpc_ceil, mpc_nint, mpc_frac, mpc_mul, mpc_square,
mpc_mul_mpf, mpc_mul_imag_mpf, mpc_mul_int,
mpc_div, mpc_div_mpf, mpc_reciprocal, mpc_mpf_div,
complex_int_pow, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_sqrt, mpc_nthroot, mpc_cbrt, mpc_exp, mpc_log, mpc_cos, mpc_sin,
mpc_tan, mpc_cos_pi, mpc_sin_pi, mpc_cosh, mpc_sinh, mpc_tanh,
mpc_atan, mpc_acos, mpc_asin, mpc_asinh, mpc_acosh, mpc_atanh,
mpc_fibonacci, mpf_expj, mpf_expjpi, mpc_expj, mpc_expjpi,
mpc_cos_sin, mpc_cos_sin_pi)
from .libelefun import (ln2_fixed, mpf_ln2, ln10_fixed, mpf_ln10,
pi_fixed, mpf_pi, e_fixed, mpf_e, phi_fixed, mpf_phi,
degree_fixed, mpf_degree,
mpf_pow, mpf_nthroot, mpf_cbrt, log_int_fixed, agm_fixed,
mpf_log, mpf_log_hypot, mpf_exp, mpf_cos_sin, mpf_cos, mpf_sin, mpf_tan,
mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, mpf_cosh_sinh,
mpf_cosh, mpf_sinh, mpf_tanh, mpf_atan, mpf_atan2, mpf_asin,
mpf_acos, mpf_asinh, mpf_acosh, mpf_atanh, mpf_fibonacci)
from .libhyper import (NoConvergence, make_hyp_summator,
mpf_erf, mpf_erfc, mpf_ei, mpc_ei, mpf_e1, mpc_e1, mpf_expint,
mpf_ci_si, mpf_ci, mpf_si, mpc_ci, mpc_si, mpf_besseljn,
mpc_besseljn, mpf_agm, mpf_agm1, mpc_agm, mpc_agm1,
mpf_ellipk, mpc_ellipk, mpf_ellipe, mpc_ellipe)
from .gammazeta import (catalan_fixed, mpf_catalan,
khinchin_fixed, mpf_khinchin, glaisher_fixed, mpf_glaisher,
apery_fixed, mpf_apery, euler_fixed, mpf_euler, mertens_fixed,
mpf_mertens, twinprime_fixed, mpf_twinprime,
mpf_bernoulli, bernfrac, mpf_gamma_int,
mpf_factorial, mpc_factorial, mpf_gamma, mpc_gamma,
mpf_loggamma, mpc_loggamma, mpf_rgamma, mpc_rgamma,
mpf_gamma_old, mpc_gamma_old, mpf_factorial_old, mpc_factorial_old,
mpf_harmonic, mpc_harmonic, mpf_psi0, mpc_psi0,
mpf_psi, mpc_psi, mpf_zeta_int, mpf_zeta, mpc_zeta,
mpf_altzeta, mpc_altzeta, mpf_zetasum, mpc_zetasum)
from .libmpi import (mpi_str,
mpi_from_str, mpi_to_str,
mpi_eq, mpi_ne,
mpi_lt, mpi_le, mpi_gt, mpi_ge,
mpi_add, mpi_sub, mpi_delta, mpi_mid,
mpi_pos, mpi_neg, mpi_abs, mpi_mul, mpi_div, mpi_exp,
mpi_log, mpi_sqrt, mpi_pow_int, mpi_pow, mpi_cos_sin,
mpi_cos, mpi_sin, mpi_tan, mpi_cot,
mpi_atan, mpi_atan2,
mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
mpci_abs, mpci_pow, mpci_exp, mpci_log, mpci_cos, mpci_sin,
mpi_gamma, mpci_gamma, mpi_loggamma, mpci_loggamma,
mpi_rgamma, mpci_rgamma, mpi_factorial, mpci_factorial)
from .libintmath import (trailing, bitcount, numeral, bin_to_radix,
isqrt, isqrt_small, isqrt_fast, sqrt_fixed, sqrtrem, ifib, ifac,
list_primes, isprime, moebius, gcd, eulernum)
from .backend import (gmpy, sage, BACKEND, STRICT, MPZ, MPZ_TYPE,
MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_THREE, MPZ_FIVE, int_types,
HASH_MODULUS, HASH_BITS)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/__init__.py
|
__init__.py
|
__docformat__ = 'plaintext'
import math
from bisect import bisect
import sys
# Importing random is slow
#from random import getrandbits
getrandbits = None
from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils)
from .libintmath import (giant_steps,
trailtable, bctable, lshift, rshift, bitcount, trailing,
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
bin_to_radix)
# We don't pickle tuples directly for the following reasons:
# 1: pickle uses str() for ints, which is inefficient when they are large
# 2: pickle doesn't work for gmpy mpzs
# Both problems are solved by using hex()
if BACKEND == 'sage':
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man), exp, bc
else:
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man)[2:], exp, bc
def from_pickable(x):
sign, man, exp, bc = x
return (sign, MPZ(man, 16), exp, bc)
class ComplexResult(ValueError):
pass
try:
intern
except NameError:
intern = lambda x: x
# All supported rounding modes
round_nearest = intern('n')
round_floor = intern('f')
round_ceiling = intern('c')
round_up = intern('u')
round_down = intern('d')
round_fast = round_down
def prec_to_dps(n):
"""Return number of accurate decimals that can be represented
with a precision of n bits."""
return max(1, int(round(int(n)/3.3219280948873626)-1))
def dps_to_prec(n):
"""Return the number of bits required to represent n decimals
accurately."""
return max(1, int(round((int(n)+1)*3.3219280948873626)))
def repr_dps(n):
"""Return the number of decimal digits required to represent
a number with n-bit precision so that it can be uniquely
reconstructed from the representation."""
dps = prec_to_dps(n)
if dps == 15:
return 17
return dps + 3
#----------------------------------------------------------------------------#
# Some commonly needed float values #
#----------------------------------------------------------------------------#
# Regular number format:
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
fzero = (0, MPZ_ZERO, 0, 0)
fnzero = (1, MPZ_ZERO, 0, 0)
fone = (0, MPZ_ONE, 0, 1)
fnone = (1, MPZ_ONE, 0, 1)
ftwo = (0, MPZ_ONE, 1, 1)
ften = (0, MPZ_FIVE, 1, 3)
fhalf = (0, MPZ_ONE, -1, 1)
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
fnan = (0, MPZ_ZERO, -123, -1)
finf = (0, MPZ_ZERO, -456, -2)
fninf = (1, MPZ_ZERO, -789, -3)
# Was 1e1000; this is broken in Python 2.4
math_float_inf = 1e300 * 1e300
#----------------------------------------------------------------------------#
# Rounding #
#----------------------------------------------------------------------------#
# This function can be used to round a mantissa generally. However,
# we will try to do most rounding inline for efficiency.
def round_int(x, n, rnd):
if rnd == round_nearest:
if x >= 0:
t = x >> (n-1)
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
return (t>>1)+1
else:
return t>>1
else:
return -round_int(-x, n, rnd)
if rnd == round_floor:
return x >> n
if rnd == round_ceiling:
return -((-x) >> n)
if rnd == round_down:
if x >= 0:
return x >> n
return -((-x) >> n)
if rnd == round_up:
if x >= 0:
return -((-x) >> n)
return x >> n
# These masks are used to pick out segments of numbers to determine
# which direction to round when rounding to nearest.
class h_mask_big:
def __getitem__(self, n):
return (MPZ_ONE<<(n-1))-1
h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
h_mask = [h_mask_big(), h_mask_small]
# The >> operator rounds to floor. shifts_down[rnd][sign]
# tells whether this is the right direction to use, or if the
# number should be negated before shifting
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
round_down:(1,1), round_up:(0,0)}
#----------------------------------------------------------------------------#
# Normalization of raw mpfs #
#----------------------------------------------------------------------------#
# This function is called almost every time an mpf is created.
# It has been optimized accordingly.
def _normalize(sign, man, exp, bc, prec, rnd):
"""
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
normalized mantissa. The mantissa is rounded in the specified
direction if its size exceeds the precision. Trailing zero bits
are also stripped from the mantissa to ensure that the
representation is canonical.
Conditions on the input:
* The input must represent a regular (finite) number
* The sign bit must be 0 or 1
* The mantissa must be positive
* The exponent must be an integer
* The bitcount must be exact
If these conditions are not met, use from_man_exp, mpf_pos, or any
of the conversion functions to create normalized raw mpf tuples.
"""
if not man:
return fzero
# Cut mantissa down to size if larger than target precision
n = bc - prec
if n > 0:
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def _normalize1(sign, man, exp, bc, prec, rnd):
"""same as normalize, but with the added condition that
man is odd or zero
"""
if not man:
return fzero
if bc <= prec:
return sign, man, exp, bc
n = bc - prec
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
try:
_exp_types = (int, long)
except NameError:
_exp_types = (int,)
def strict_normalize(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
return _normalize(sign, man, exp, bc, prec, rnd)
def strict_normalize1(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
assert (not man) or (man & 1)
return _normalize1(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
_normalize = gmpy._mpmath_normalize
_normalize1 = gmpy._mpmath_normalize
if BACKEND == 'sage':
_normalize = _normalize1 = sage_utils.normalize
if STRICT:
normalize = strict_normalize
normalize1 = strict_normalize1
else:
normalize = _normalize
normalize1 = _normalize1
#----------------------------------------------------------------------------#
# Conversion functions #
#----------------------------------------------------------------------------#
def from_man_exp(man, exp, prec=None, rnd=round_fast):
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
If no precision is specified, the mantissa is stored exactly."""
man = MPZ(man)
sign = 0
if man < 0:
sign = 1
man = -man
if man < 1024:
bc = bctable[int(man)]
else:
bc = bitcount(man)
if not prec:
if not man:
return fzero
if not man & 1:
if man & 2:
return (sign, man >> 1, exp + 1, bc - 1)
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
return (sign, man, exp, bc)
return normalize(sign, man, exp, bc, prec, rnd)
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
from_man_exp = gmpy._mpmath_create
if BACKEND == 'sage':
from_man_exp = sage_utils.from_man_exp
def from_int(n, prec=0, rnd=round_fast):
"""Create a raw mpf from an integer. If no precision is specified,
the mantissa is stored exactly."""
if not prec:
if n in int_cache:
return int_cache[n]
return from_man_exp(n, 0, prec, rnd)
def to_man_exp(s):
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("mantissa and exponent are undefined for %s" % man)
return man, exp
def to_int(s, rnd=None):
"""Convert a raw mpf to the nearest int. Rounding is done down by
default (same as int(float) in Python), but can be changed. If the
input is inf/nan, an exception is raised."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("cannot convert %s to int" % man)
if exp >= 0:
if sign:
return (-man) << exp
return man << exp
# Make default rounding fast
if not rnd:
if sign:
return -(man >> (-exp))
else:
return man >> (-exp)
if sign:
return round_int(-man, -exp, rnd)
else:
return round_int(man, -exp, rnd)
def mpf_round_int(s, rnd):
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp >= 0:
return s
mag = exp+bc
if mag < 1:
if rnd == round_ceiling:
if sign: return fzero
else: return fone
elif rnd == round_floor:
if sign: return fnone
else: return fzero
elif rnd == round_nearest:
if mag < 0 or man == MPZ_ONE: return fzero
elif sign: return fnone
else: return fone
else:
raise NotImplementedError
return mpf_pos(s, min(bc, mag), rnd)
def mpf_floor(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_floor)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_ceil(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_ceiling)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_nint(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_nearest)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_frac(s, prec=0, rnd=round_fast):
return mpf_sub(s, mpf_floor(s), prec, rnd)
def from_float(x, prec=53, rnd=round_fast):
"""Create a raw mpf from a Python float, rounding if necessary.
If prec >= 53, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=53."""
# frexp only raises an exception for nan on some platforms
if x != x:
return fnan
# in Python2.5 math.frexp gives an exception for float infinity
# in Python2.6 it returns (float infinity, 0)
try:
m, e = math.frexp(x)
except:
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return fnan
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
def to_float(s, strict=False):
"""
Convert a raw mpf to a Python float. The result is exact if the
bitcount of s is <= 53 and no underflow/overflow occurs.
If the number is too large or too small to represent as a regular
float, it will be converted to inf or 0.0. Setting strict=True
forces an OverflowError to be raised instead.
"""
sign, man, exp, bc = s
if not man:
if s == fzero: return 0.0
if s == finf: return math_float_inf
if s == fninf: return -math_float_inf
return math_float_inf/math_float_inf
if sign:
man = -man
try:
if bc < 100:
return math.ldexp(man, exp)
# Try resizing the mantissa. Overflow may still happen here.
n = bc - 53
m = man >> n
return math.ldexp(m, exp + n)
except OverflowError:
if strict:
raise
# Overflow to infinity
if exp + bc > 0:
if sign:
return -math_float_inf
else:
return math_float_inf
# Underflow to zero
return 0.0
def from_rational(p, q, prec, rnd=round_fast):
"""Create a raw mpf from a rational number p/q, round if
necessary."""
return mpf_div(from_int(p), from_int(q), prec, rnd)
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
def to_fixed(s, prec):
"""Convert a raw mpf to a fixed-point big integer"""
sign, man, exp, bc = s
offset = exp + prec
if sign:
if offset >= 0: return (-man) << offset
else: return (-man) >> (-offset)
else:
if offset >= 0: return man << offset
else: return man >> (-offset)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Arithmetic operations, etc. #
#----------------------------------------------------------------------------#
def mpf_rand(prec):
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
in the mantissa."""
global getrandbits
if not getrandbits:
import random
getrandbits = random.getrandbits
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
def mpf_eq(s, t):
"""Test equality of two raw mpfs. This is simply tuple comparion
unless either number is nan, in which case the result is False."""
if not s[1] or not t[1]:
if s == fnan or t == fnan:
return False
return s == t
def mpf_hash(s):
# Duplicate the new hash algorithm introduces in Python 3.2.
if sys.version >= "3.2":
ssign, sman, sexp, sbc = s
# Handle special numbers
if not sman:
if s == fnan: return sys.hash_info.nan
if s == finf: return sys.hash_info.inf
if s == fninf: return -sys.hash_info.inf
h = sman % HASH_MODULUS
if sexp >= 0:
sexp = sexp % HASH_BITS
else:
sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS)
h = (h << sexp) % HASH_MODULUS
if ssign: h = -h
if h == -1: h == -2
return int(h)
else:
try:
# Try to be compatible with hash values for floats and ints
return hash(to_float(s, strict=1))
except OverflowError:
# We must unfortunately sacrifice compatibility with ints here.
# We could do hash(man << exp) when the exponent is positive, but
# this would cause unreasonable inefficiency for large numbers.
return hash(s)
def mpf_cmp(s, t):
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
and 1 if s > t. (Same convention as Python's cmp() function.)"""
# In principle, a comparison amounts to determining the sign of s-t.
# A full subtraction is relatively slow, however, so we first try to
# look at the components.
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
# Handle zeros and special numbers
if not sman or not tman:
if s == fzero: return -mpf_sign(t)
if t == fzero: return mpf_sign(s)
if s == t: return 0
# Follow same convention as Python's cmp for float nan
if t == fnan: return 1
if s == finf: return 1
if t == fninf: return 1
return -1
# Different sides of zero
if ssign != tsign:
if not ssign: return 1
return -1
# This reduces to direct integer comparison
if sexp == texp:
if sman == tman:
return 0
if sman > tman:
if ssign: return -1
else: return 1
else:
if ssign: return 1
else: return -1
# Check position of the highest set bit in each number. If
# different, there is certainly an inequality.
a = sbc + sexp
b = tbc + texp
if ssign:
if a < b: return 1
if a > b: return -1
else:
if a < b: return -1
if a > b: return 1
# Both numbers have the same highest bit. Subtract to find
# how the lower bits compare.
delta = mpf_sub(s, t, 5, round_floor)
if delta[0]:
return -1
return 1
def mpf_lt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) < 0
def mpf_le(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) <= 0
def mpf_gt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) > 0
def mpf_ge(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) >= 0
def mpf_min_max(seq):
min = max = seq[0]
for x in seq[1:]:
if mpf_lt(x, min): min = x
if mpf_gt(x, max): max = x
return min, max
def mpf_pos(s, prec=0, rnd=round_fast):
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
precision)."""
if prec:
sign, man, exp, bc = s
if (not man) and exp:
return s
return normalize1(sign, man, exp, bc, prec, rnd)
return s
def mpf_neg(s, prec=None, rnd=round_fast):
"""Negate a raw mpf (return -s), rounding the result to the
specified precision. The prec argument can be omitted to do the
operation exactly."""
sign, man, exp, bc = s
if not man:
if exp:
if s == finf: return fninf
if s == fninf: return finf
return s
if not prec:
return (1-sign, man, exp, bc)
return normalize1(1-sign, man, exp, bc, prec, rnd)
def mpf_abs(s, prec=None, rnd=round_fast):
"""Return abs(s) of the raw mpf s, rounded to the specified
precision. The prec argument can be omitted to generate an
exact result."""
sign, man, exp, bc = s
if (not man) and exp:
if s == fninf:
return finf
return s
if not prec:
if sign:
return (0, man, exp, bc)
return s
return normalize1(0, man, exp, bc, prec, rnd)
def mpf_sign(s):
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
sign, man, exp, bc = s
if not man:
if s == finf: return 1
if s == fninf: return -1
return 0
return (-1) ** sign
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
"""
Add the two raw mpf values s and t.
With prec=0, no rounding is performed. Note that this can
produce a very large mantissa (potentially too large to fit
in memory) if exponents are far apart.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
tsign ^= _sub
# Standard case: two nonzero, regular numbers
if sman and tman:
offset = sexp - texp
if offset:
if offset > 0:
# Outside precision range; only need to perturb
if offset > 100 and prec:
delta = sbc + sexp - tbc - texp
if delta > prec + 4:
offset = prec + 4
sman <<= offset
if tsign == ssign: sman += 1
else: sman -= 1
return normalize1(ssign, sman, sexp-offset,
bitcount(sman), prec, rnd)
# Add
if ssign == tsign:
man = tman + (sman << offset)
# Subtract
else:
if ssign: man = tman - (sman << offset)
else: man = (sman << offset) - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
elif offset < 0:
# Outside precision range; only need to perturb
if offset < -100 and prec:
delta = tbc + texp - sbc - sexp
if delta > prec + 4:
offset = prec + 4
tman <<= offset
if ssign == tsign: tman += 1
else: tman -= 1
return normalize1(tsign, tman, texp-offset,
bitcount(tman), prec, rnd)
# Add
if ssign == tsign:
man = sman + (tman << -offset)
# Subtract
else:
if tsign: man = sman - (tman << -offset)
else: man = (tman << -offset) - sman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
# Equal exponents; no shifting necessary
if ssign == tsign:
man = tman + sman
else:
if ssign: man = tman - sman
else: man = sman - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize(ssign, man, texp, bc, prec or bc, rnd)
# Handle zeros and special numbers
if _sub:
t = mpf_neg(t)
if not sman:
if sexp:
if s == t or tman or not texp:
return s
return fnan
if tman:
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
return t
if texp:
return t
if sman:
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
return s
def mpf_sub(s, t, prec=0, rnd=round_fast):
"""Return the difference of two raw mpfs, s-t. This function is
simply a wrapper of mpf_add that changes the sign of t."""
return mpf_add(s, t, prec, rnd, 1)
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
"""
Sum a list of mpf values efficiently and accurately
(typically no temporary roundoff occurs). If prec=0,
the final result will not be rounded either.
There may be roundoff error or cancellation if extremely
large exponent differences occur.
With absolute=True, sums the absolute values.
"""
man = 0
exp = 0
max_extra_prec = prec*2 or 1000000 # XXX
special = None
for x in xs:
xsign, xman, xexp, xbc = x
if xman:
if xsign and not absolute:
xman = -xman
delta = xexp - exp
if xexp >= exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
man = xman
exp = xexp
else:
man += (xman << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-xbc > max_extra_prec:
if not man:
man, exp = xman, xexp
else:
man = (man << delta) + xman
exp = xexp
elif xexp:
if absolute:
x = mpf_abs(x)
special = mpf_add(special or fzero, x, 1)
# Will be inf or nan
if special:
return special
return from_man_exp(man, exp, prec, rnd)
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = bitcount(man)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
return normalize(sign, man, exp, bitcount(man), prec, rnd)
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = sbc + tbc - 1
bc += int(man>>bc)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
# Generally n will be small
if n < 1024:
bc += bctable[int(n)] - 1
else:
bc += bitcount(n) - 1
bc += int(man>>bc)
return normalize(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy':
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
else:
mpf_mul = python_mpf_mul
mpf_mul_int = python_mpf_mul_int
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp+n, bc
def mpf_frexp(x):
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
sign, man, exp, bc = x
if not man:
if x == fzero:
return (fzero, 0)
else:
raise ValueError
return mpf_shift(x, -bc-exp), bc+exp
def mpf_div(s, t, prec, rnd=round_fast):
"""Floating-point division"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if not sman or not tman:
if s == fzero:
if t == fzero: raise ZeroDivisionError
if t == fnan: return fnan
return fzero
if t == fzero:
raise ZeroDivisionError
s_special = (not sman) and sexp
t_special = (not tman) and texp
if s_special and t_special:
return fnan
if s == fnan or t == fnan:
return fnan
if not t_special:
if t == fzero:
return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
return fzero
sign = ssign ^ tsign
if tman == 1:
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
# Same strategy as for addition: if there is a remainder, perturb
# the result a few bits outside the precision range before rounding
extra = prec - sbc + tbc + 5
if extra < 5:
extra = 5
quot, rem = divmod(sman<<extra, tman)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
"""Floating-point division n/t with a Python integer as numerator"""
sign, man, exp, bc = t
if not n or not man:
return mpf_div(from_int(n), t, prec, rnd)
if n < 0:
sign ^= 1
n = -n
extra = prec + bc + 5
quot, rem = divmod(n<<extra, man)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
def mpf_mod(s, t, prec, rnd=round_fast):
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ((not sman) and sexp) or ((not tman) and texp):
return fnan
# Important special case: do nothing if t is larger
if ssign == tsign and texp > sexp+sbc:
return s
# Another important special case: this allows us to do e.g. x % 1.0
# to find the fractional part of x, and it will work when x is huge.
if tman == 1 and sexp > texp+tbc:
return fzero
base = min(sexp, texp)
sman = (-1)**ssign * sman
tman = (-1)**tsign * tman
man = (sman << (sexp-base)) % (tman << (texp-base))
if man >= 0:
sign = 0
else:
man = -man
sign = 1
return normalize(sign, man, base, bitcount(man), prec, rnd)
reciprocal_rnd = {
round_down : round_up,
round_up : round_down,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
negative_rnd = {
round_down : round_down,
round_up : round_up,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
def mpf_pow_int(s, n, prec, rnd=round_fast):
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
sign, man, exp, bc = s
if (not man) and exp:
if s == finf:
if n > 0: return s
if n == 0: return fnan
return fzero
if s == fninf:
if n > 0: return [finf, fninf][n & 1]
if n == 0: return fnan
return fzero
return fnan
n = int(n)
if n == 0: return fone
if n == 1: return mpf_pos(s, prec, rnd)
if n == 2:
_, man, exp, bc = s
if not man:
return fzero
man = man*man
if man == 1:
return (0, MPZ_ONE, exp+exp, 1)
bc = bc + bc - 2
bc += bctable[int(man>>bc)]
return normalize1(0, man, exp+exp, bc, prec, rnd)
if n == -1: return mpf_div(fone, s, prec, rnd)
if n < 0:
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
return mpf_div(fone, inverse, prec, rnd)
result_sign = sign & n
# Use exact integer power when the exact mantissa is small
if man == 1:
return (result_sign, MPZ_ONE, exp*n, 1)
if bc*n < 1000:
man **= n
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
# Use directed rounding all the way through to maintain rigorous
# bounds for interval arithmetic
rounds_down = (rnd == round_nearest) or \
shifts_down[rnd][result_sign]
# Now we perform binary exponentiation. Need to estimate precision
# to avoid rounding errors from temporary operations. Roughly log_2(n)
# operations are performed.
workprec = prec + 4*bitcount(n) + 4
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*man
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
if rounds_down:
pm = pm >> (pbc-workprec)
else:
pm = -((-pm) >> (pbc-workprec))
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
man = man*man
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(man >> bc)]
if bc > workprec:
if rounds_down:
man = man >> (bc-workprec)
else:
man = -((-man) >> (bc-workprec))
exp += bc - workprec
bc = workprec
n = n // 2
return normalize(result_sign, pm, pe, pbc, prec, rnd)
def mpf_perturb(x, eps_sign, prec, rnd):
"""
For nonzero x, calculate x + eps with directed rounding, where
eps < prec relatively and eps has the given sign (0 for
positive, 1 for negative).
With rounding to nearest, this is taken to simply normalize
x to the given precision.
"""
if rnd == round_nearest:
return mpf_pos(x, prec, rnd)
sign, man, exp, bc = x
eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
if sign:
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
else:
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
if away:
return mpf_add(x, eps, prec, rnd)
else:
return mpf_pos(x, prec, rnd)
#----------------------------------------------------------------------------#
# Radix conversion #
#----------------------------------------------------------------------------#
def to_digits_exp(s, dps):
"""Helper function for representing the floating-point number s as
a decimal with dps digits. Returns (sign, string, exponent) where
sign is '' or '-', string is the digit string, and exponent is
the decimal exponent as an int.
If inexact, the decimal representation is rounded toward zero."""
# Extract sign first so it doesn't mess up the string digit count
if s[0]:
sign = '-'
s = mpf_neg(s)
else:
sign = ''
_sign, man, exp, bc = s
if not man:
return '', '0', 0
bitprec = int(dps * math.log(10,2)) + 10
# Cut down to size
# TODO: account for precision when doing this
exp_from_1 = exp + bc
if abs(exp_from_1) > 3500:
from .libelefun import mpf_ln2, mpf_ln10
# Set b = int(exp * log(2)/log(10))
# If exp is huge, we must use high-precision arithmetic to
# find the nearest power of ten
expprec = bitcount(abs(exp)) + 5
tmp = from_int(exp)
tmp = mpf_mul(tmp, mpf_ln2(expprec))
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
b = to_int(tmp)
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
_sign, man, exp, bc = s
exponent = b
else:
exponent = 0
# First, calculate mantissa digits by converting to a binary
# fixed-point number and then converting that number to
# a decimal fixed-point number.
fixprec = max(bitprec - exp - bc, 0)
fixdps = int(fixprec / math.log(10,2) + 0.5)
sf = to_fixed(s, fixprec)
sd = bin_to_radix(sf, fixprec, 10, fixdps)
digits = numeral(sd, base=10, size=dps)
exponent += len(digits) - fixdps - 1
return sign, digits, exponent
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
show_zero_exponent=False):
"""
Convert a raw mpf to a decimal floating-point literal with at
most `dps` decimal digits in the mantissa (not counting extra zeros
that may be inserted for visual purposes).
The number will be printed in fixed-point format if the position
of the leading digit is strictly between min_fixed
(default = min(-dps/3,-5)) and max_fixed (default = dps).
To force fixed-point format always, set min_fixed = -inf,
max_fixed = +inf. To force floating-point format, set
min_fixed >= max_fixed.
The literal is formatted so that it can be parsed back to a number
by to_str, float() or Decimal().
"""
# Special numbers
if not s[1]:
if s == fzero:
if dps: t = '0.0'
else: t = '.0'
if show_zero_exponent:
t += 'e+0'
return t
if s == finf: return '+inf'
if s == fninf: return '-inf'
if s == fnan: return 'nan'
raise ValueError
if min_fixed is None: min_fixed = min(-(dps//3), -5)
if max_fixed is None: max_fixed = dps
# to_digits_exp rounds to floor.
# This sometimes kills some instances of "...00001"
sign, digits, exponent = to_digits_exp(s, dps+3)
# No digits: show only .0; round exponent to nearest
if not dps:
if digits[0] in '56789':
exponent += 1
digits = ".0"
else:
# Rounding up kills some instances of "...99999"
if len(digits) > dps and digits[dps] in '56789' and \
(dps < 500 or digits[dps-4:dps] == '9999'):
digits2 = str(int(digits[:dps]) + 1)
if len(digits2) > dps:
digits2 = digits2[:dps]
exponent += 1
digits = digits2
else:
digits = digits[:dps]
# Prettify numbers close to unit magnitude
if min_fixed < exponent < max_fixed:
if exponent < 0:
digits = ("0"*int(-exponent)) + digits
split = 1
else:
split = exponent + 1
if split > dps:
digits += "0"*(split-dps)
exponent = 0
else:
split = 1
digits = (digits[:split] + "." + digits[split:])
if strip_zeros:
# Clean up trailing zeros
digits = digits.rstrip('0')
if digits[-1] == ".":
digits += "0"
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
if exponent >= 0: return sign + digits + "e+" + str(exponent)
if exponent < 0: return sign + digits + "e" + str(exponent)
def str_to_man_exp(x, base=10):
"""Helper function for from_str."""
# Verify that the input is a valid float literal
float(x)
# Split into mantissa, exponent
x = x.lower()
parts = x.split('e')
if len(parts) == 1:
exp = 0
else: # == 2
x = parts[0]
exp = int(parts[1])
# Look for radix point in mantissa
parts = x.split('.')
if len(parts) == 2:
a, b = parts[0], parts[1].rstrip('0')
exp -= len(b)
x = a + b
x = MPZ(int(x, base))
return x, exp
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
def from_str(x, prec, rnd=round_fast):
"""Create a raw mpf from a decimal literal, rounding in the
specified direction if the input number cannot be represented
exactly as a binary floating-point number with the given number of
bits. The literal syntax accepted is the same as for Python
floats.
TODO: the rounding does not work properly for large exponents.
"""
x = x.strip()
if x in special_str:
return special_str[x]
if '/' in x:
p, q = x.split('/')
return from_rational(int(p), int(q), prec, rnd)
man, exp = str_to_man_exp(x, base=10)
# XXX: appropriate cutoffs & track direction
# note no factors of 5
if abs(exp) > 400:
s = from_int(man, prec+10)
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
else:
if exp >= 0:
s = from_int(man * 10**exp, prec, rnd)
else:
s = from_rational(man, 10**-exp, prec, rnd)
return s
# Binary string conversion. These are currently mainly used for debugging
# and could use some improvement in the future
def from_bstr(x):
man, exp = str_to_man_exp(x, base=2)
man = MPZ(man)
sign = 0
if man < 0:
man = -man
sign = 1
bc = bitcount(man)
return normalize(sign, man, exp, bc, bc, round_floor)
def to_bstr(x):
sign, man, exp, bc = x
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
#----------------------------------------------------------------------------#
# Square roots #
#----------------------------------------------------------------------------#
def mpf_sqrt(s, prec, rnd=round_fast):
"""
Compute the square root of a nonnegative mpf value. The
result is correctly rounded.
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("square root of a negative number")
if not man:
return s
if exp & 1:
exp -= 1
man <<= 1
bc += 1
elif man == 1:
return normalize1(sign, man, exp//2, bc, prec, rnd)
shift = max(4, 2*prec-bc+4)
shift += shift & 1
if rnd in 'fd':
man = isqrt(man<<shift)
else:
man, rem = sqrtrem(man<<shift)
# Perturb up
if rem:
man = (man<<1)+1
shift += 2
return from_man_exp(man, (exp-shift)//2, prec, rnd)
def mpf_hypot(x, y, prec, rnd=round_fast):
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
x and y."""
if y == fzero: return mpf_abs(x, prec, rnd)
if x == fzero: return mpf_abs(y, prec, rnd)
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
return mpf_sqrt(hypot2, prec, rnd)
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as ext_lib
mpf_add = ext_lib.mpf_add
mpf_sub = ext_lib.mpf_sub
mpf_mul = ext_lib.mpf_mul
mpf_div = ext_lib.mpf_div
mpf_sqrt = ext_lib.mpf_sqrt
except ImportError:
pass
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/libmpf.py
|
libmpf.py
|
import os
import sys
#----------------------------------------------------------------------------#
# Support GMPY for high-speed large integer arithmetic. #
# #
# To allow an external module to handle arithmetic, we need to make sure #
# that all high-precision variables are declared of the correct type. MPZ #
# is the constructor for the high-precision type. It defaults to Python's #
# long type but can be assinged another type, typically gmpy.mpz. #
# #
# MPZ must be used for the mantissa component of an mpf and must be used #
# for internal fixed-point operations. #
# #
# Side-effects #
# 1) "is" cannot be used to test for special values. Must use "==". #
# 2) There are bugs in GMPY prior to v1.02 so we must use v1.03 or later. #
#----------------------------------------------------------------------------#
# So we can import it from this module
gmpy = None
sage = None
sage_utils = None
try:
xrange
python3 = False
except NameError:
python3 = True
BACKEND = 'python'
if not python3:
MPZ = long
xrange = xrange
basestring = basestring
from .exec_py2 import exec_
else:
MPZ = int
xrange = range
basestring = str
from .exec_py3 import exec_
# Define constants for calculating hash on Python 3.2.
if sys.version >= "3.2":
HASH_MODULUS = sys.hash_info.modulus
if sys.hash_info.width == 32:
HASH_BITS = 31
else:
HASH_BITS = 61
else:
HASH_MODULUS = None
HASH_BITS = None
if 'MPMATH_NOGMPY' not in os.environ:
try:
try:
import gmpy2 as gmpy
except ImportError:
try:
import gmpy
except ImportError:
raise ImportError
if gmpy.version() >= '1.03':
BACKEND = 'gmpy'
MPZ = gmpy.mpz
except:
pass
if 'MPMATH_NOSAGE' not in os.environ:
try:
import sage.all
import sage.libs.mpmath.utils as _sage_utils
sage = sage.all
sage_utils = _sage_utils
BACKEND = 'sage'
MPZ = sage.Integer
except:
pass
if 'MPMATH_STRICT' in os.environ:
STRICT = True
else:
STRICT = False
MPZ_TYPE = type(MPZ(0))
MPZ_ZERO = MPZ(0)
MPZ_ONE = MPZ(1)
MPZ_TWO = MPZ(2)
MPZ_THREE = MPZ(3)
MPZ_FIVE = MPZ(5)
try:
if BACKEND == 'python':
int_types = (int, long)
else:
int_types = (int, long, MPZ_TYPE)
except NameError:
if BACKEND == 'python':
int_types = (int,)
else:
int_types = (int, MPZ_TYPE)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/libmp/backend.py
|
backend.py
|
from ..libmp.backend import xrange
# TODO: should use diagonalization-based algorithms
class MatrixCalculusMethods:
def _exp_pade(ctx, a):
"""
Exponential of a matrix using Pade approximants.
See G. H. Golub, C. F. van Loan 'Matrix Computations',
third Ed., page 572
TODO:
- find a good estimate for q
- reduce the number of matrix multiplications to improve
performance
"""
def eps_pade(p):
return ctx.mpf(2)**(3-2*p) * \
ctx.factorial(p)**2/(ctx.factorial(2*p)**2 * (2*p + 1))
q = 4
extraq = 8
while 1:
if eps_pade(q) < ctx.eps:
break
q += 1
q += extraq
j = int(max(1, ctx.mag(ctx.mnorm(a,'inf'))))
extra = q
prec = ctx.prec
ctx.dps += extra + 3
try:
a = a/2**j
na = a.rows
den = ctx.eye(na)
num = ctx.eye(na)
x = ctx.eye(na)
c = ctx.mpf(1)
for k in range(1, q+1):
c *= ctx.mpf(q - k + 1)/((2*q - k + 1) * k)
x = a*x
cx = c*x
num += cx
den += (-1)**k * cx
f = ctx.lu_solve_mat(den, num)
for k in range(j):
f = f*f
finally:
ctx.prec = prec
return f*1
def expm(ctx, A, method='taylor'):
r"""
Computes the matrix exponential of a square matrix `A`, which is defined
by the power series
.. math ::
\exp(A) = I + A + \frac{A^2}{2!} + \frac{A^3}{3!} + \ldots
With method='taylor', the matrix exponential is computed
using the Taylor series. With method='pade', Pade approximants
are used instead.
**Examples**
Basic examples::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> expm(zeros(3))
[1.0 0.0 0.0]
[0.0 1.0 0.0]
[0.0 0.0 1.0]
>>> expm(eye(3))
[2.71828182845905 0.0 0.0]
[ 0.0 2.71828182845905 0.0]
[ 0.0 0.0 2.71828182845905]
>>> expm([[1,1,0],[1,0,1],[0,1,0]])
[ 3.86814500615414 2.26812870852145 0.841130841230196]
[ 2.26812870852145 2.44114713886289 1.42699786729125]
[0.841130841230196 1.42699786729125 1.6000162976327]
>>> expm([[1,1,0],[1,0,1],[0,1,0]], method='pade')
[ 3.86814500615414 2.26812870852145 0.841130841230196]
[ 2.26812870852145 2.44114713886289 1.42699786729125]
[0.841130841230196 1.42699786729125 1.6000162976327]
>>> expm([[1+j, 0], [1+j,1]])
[(1.46869393991589 + 2.28735528717884j) 0.0]
[ (1.03776739863568 + 3.536943175722j) (2.71828182845905 + 0.0j)]
Matrices with large entries are allowed::
>>> expm(matrix([[1,2],[2,3]])**25)
[5.65024064048415e+2050488462815550 9.14228140091932e+2050488462815550]
[9.14228140091932e+2050488462815550 1.47925220414035e+2050488462815551]
The identity `\exp(A+B) = \exp(A) \exp(B)` does not hold for
noncommuting matrices::
>>> A = hilbert(3)
>>> B = A + eye(3)
>>> chop(mnorm(A*B - B*A))
0.0
>>> chop(mnorm(expm(A+B) - expm(A)*expm(B)))
0.0
>>> B = A + ones(3)
>>> mnorm(A*B - B*A)
1.8
>>> mnorm(expm(A+B) - expm(A)*expm(B))
42.0927851137247
"""
A = ctx.matrix(A)
if method == 'pade':
return ctx._exp_pade(A)
prec = ctx.prec
j = int(max(1, ctx.mag(ctx.mnorm(A,'inf'))))
j += int(0.5*prec**0.5)
try:
ctx.prec += 10 + 2*j
tol = +ctx.eps
A = A/2**j
T = A
Y = A**0 + A
k = 2
while 1:
T *= A * (1/ctx.mpf(k))
if ctx.mnorm(T, 'inf') < tol:
break
Y += T
k += 1
for k in xrange(j):
Y = Y*Y
finally:
ctx.prec = prec
Y *= 1
return Y
def cosm(ctx, A):
r"""
Gives the cosine of a square matrix `A`, defined in analogy
with the matrix exponential.
Examples::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> X = eye(3)
>>> cosm(X)
[0.54030230586814 0.0 0.0]
[ 0.0 0.54030230586814 0.0]
[ 0.0 0.0 0.54030230586814]
>>> X = hilbert(3)
>>> cosm(X)
[ 0.424403834569555 -0.316643413047167 -0.221474945949293]
[-0.316643413047167 0.820646708837824 -0.127183694770039]
[-0.221474945949293 -0.127183694770039 0.909236687217541]
>>> X = matrix([[1+j,-2],[0,-j]])
>>> cosm(X)
[(0.833730025131149 - 0.988897705762865j) (1.07485840848393 - 0.17192140544213j)]
[ 0.0 (1.54308063481524 + 0.0j)]
"""
B = 0.5 * (ctx.expm(A*ctx.j) + ctx.expm(A*(-ctx.j)))
if not sum(A.apply(ctx.im).apply(abs)):
B = B.apply(ctx.re)
return B
def sinm(ctx, A):
r"""
Gives the sine of a square matrix `A`, defined in analogy
with the matrix exponential.
Examples::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> X = eye(3)
>>> sinm(X)
[0.841470984807897 0.0 0.0]
[ 0.0 0.841470984807897 0.0]
[ 0.0 0.0 0.841470984807897]
>>> X = hilbert(3)
>>> sinm(X)
[0.711608512150994 0.339783913247439 0.220742837314741]
[0.339783913247439 0.244113865695532 0.187231271174372]
[0.220742837314741 0.187231271174372 0.155816730769635]
>>> X = matrix([[1+j,-2],[0,-j]])
>>> sinm(X)
[(1.29845758141598 + 0.634963914784736j) (-1.96751511930922 + 0.314700021761367j)]
[ 0.0 (0.0 - 1.1752011936438j)]
"""
B = (-0.5j) * (ctx.expm(A*ctx.j) - ctx.expm(A*(-ctx.j)))
if not sum(A.apply(ctx.im).apply(abs)):
B = B.apply(ctx.re)
return B
def _sqrtm_rot(ctx, A, _may_rotate):
# If the iteration fails to converge, cheat by performing
# a rotation by a complex number
u = ctx.j**0.3
return ctx.sqrtm(u*A, _may_rotate) / ctx.sqrt(u)
def sqrtm(ctx, A, _may_rotate=2):
r"""
Computes a square root of the square matrix `A`, i.e. returns
a matrix `B = A^{1/2}` such that `B^2 = A`. The square root
of a matrix, if it exists, is not unique.
**Examples**
Square roots of some simple matrices::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sqrtm([[1,0], [0,1]])
[1.0 0.0]
[0.0 1.0]
>>> sqrtm([[0,0], [0,0]])
[0.0 0.0]
[0.0 0.0]
>>> sqrtm([[2,0],[0,1]])
[1.4142135623731 0.0]
[ 0.0 1.0]
>>> sqrtm([[1,1],[1,0]])
[ (0.920442065259926 - 0.21728689675164j) (0.568864481005783 + 0.351577584254143j)]
[(0.568864481005783 + 0.351577584254143j) (0.351577584254143 - 0.568864481005783j)]
>>> sqrtm([[1,0],[0,1]])
[1.0 0.0]
[0.0 1.0]
>>> sqrtm([[-1,0],[0,1]])
[(0.0 - 1.0j) 0.0]
[ 0.0 (1.0 + 0.0j)]
>>> sqrtm([[j,0],[0,j]])
[(0.707106781186547 + 0.707106781186547j) 0.0]
[ 0.0 (0.707106781186547 + 0.707106781186547j)]
A square root of a rotation matrix, giving the corresponding
half-angle rotation matrix::
>>> t1 = 0.75
>>> t2 = t1 * 0.5
>>> A1 = matrix([[cos(t1), -sin(t1)], [sin(t1), cos(t1)]])
>>> A2 = matrix([[cos(t2), -sin(t2)], [sin(t2), cos(t2)]])
>>> sqrtm(A1)
[0.930507621912314 -0.366272529086048]
[0.366272529086048 0.930507621912314]
>>> A2
[0.930507621912314 -0.366272529086048]
[0.366272529086048 0.930507621912314]
The identity `(A^2)^{1/2} = A` does not necessarily hold::
>>> A = matrix([[4,1,4],[7,8,9],[10,2,11]])
>>> sqrtm(A**2)
[ 4.0 1.0 4.0]
[ 7.0 8.0 9.0]
[10.0 2.0 11.0]
>>> sqrtm(A)**2
[ 4.0 1.0 4.0]
[ 7.0 8.0 9.0]
[10.0 2.0 11.0]
>>> A = matrix([[-4,1,4],[7,-8,9],[10,2,11]])
>>> sqrtm(A**2)
[ 7.43715112194995 -0.324127569985474 1.8481718827526]
[-0.251549715716942 9.32699765900402 2.48221180985147]
[ 4.11609388833616 0.775751877098258 13.017955697342]
>>> chop(sqrtm(A)**2)
[-4.0 1.0 4.0]
[ 7.0 -8.0 9.0]
[10.0 2.0 11.0]
For some matrices, a square root does not exist::
>>> sqrtm([[0,1], [0,0]])
Traceback (most recent call last):
...
ZeroDivisionError: matrix is numerically singular
Two examples from the documentation for Matlab's ``sqrtm``::
>>> mp.dps = 15; mp.pretty = True
>>> sqrtm([[7,10],[15,22]])
[1.56669890360128 1.74077655955698]
[2.61116483933547 4.17786374293675]
>>>
>>> X = matrix(\
... [[5,-4,1,0,0],
... [-4,6,-4,1,0],
... [1,-4,6,-4,1],
... [0,1,-4,6,-4],
... [0,0,1,-4,5]])
>>> Y = matrix(\
... [[2,-1,-0,-0,-0],
... [-1,2,-1,0,-0],
... [0,-1,2,-1,0],
... [-0,0,-1,2,-1],
... [-0,-0,-0,-1,2]])
>>> mnorm(sqrtm(X) - Y)
4.53155328326114e-19
"""
A = ctx.matrix(A)
# Trivial
if A*0 == A:
return A
prec = ctx.prec
if _may_rotate:
d = ctx.det(A)
if abs(ctx.im(d)) < 16*ctx.eps and ctx.re(d) < 0:
return ctx._sqrtm_rot(A, _may_rotate-1)
try:
ctx.prec += 10
tol = ctx.eps * 128
Y = A
Z = I = A**0
k = 0
# Denman-Beavers iteration
while 1:
Yprev = Y
try:
Y, Z = 0.5*(Y+ctx.inverse(Z)), 0.5*(Z+ctx.inverse(Y))
except ZeroDivisionError:
if _may_rotate:
Y = ctx._sqrtm_rot(A, _may_rotate-1)
break
else:
raise
mag1 = ctx.mnorm(Y-Yprev, 'inf')
mag2 = ctx.mnorm(Y, 'inf')
if mag1 <= mag2*tol:
break
if _may_rotate and k > 6 and not mag1 < mag2 * 0.001:
return ctx._sqrtm_rot(A, _may_rotate-1)
k += 1
if k > ctx.prec:
raise ctx.NoConvergence
finally:
ctx.prec = prec
Y *= 1
return Y
def logm(ctx, A):
r"""
Computes a logarithm of the square matrix `A`, i.e. returns
a matrix `B = \log(A)` such that `\exp(B) = A`. The logarithm
of a matrix, if it exists, is not unique.
**Examples**
Logarithms of some simple matrices::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> X = eye(3)
>>> logm(X)
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
>>> logm(2*X)
[0.693147180559945 0.0 0.0]
[ 0.0 0.693147180559945 0.0]
[ 0.0 0.0 0.693147180559945]
>>> logm(expm(X))
[1.0 0.0 0.0]
[0.0 1.0 0.0]
[0.0 0.0 1.0]
A logarithm of a complex matrix::
>>> X = matrix([[2+j, 1, 3], [1-j, 1-2*j, 1], [-4, -5, j]])
>>> B = logm(X)
>>> nprint(B)
[ (0.808757 + 0.107759j) (2.20752 + 0.202762j) (1.07376 - 0.773874j)]
[ (0.905709 - 0.107795j) (0.0287395 - 0.824993j) (0.111619 + 0.514272j)]
[(-0.930151 + 0.399512j) (-2.06266 - 0.674397j) (0.791552 + 0.519839j)]
>>> chop(expm(B))
[(2.0 + 1.0j) 1.0 3.0]
[(1.0 - 1.0j) (1.0 - 2.0j) 1.0]
[ -4.0 -5.0 (0.0 + 1.0j)]
A matrix `X` close to the identity matrix, for which
`\log(\exp(X)) = \exp(\log(X)) = X` holds::
>>> X = eye(3) + hilbert(3)/4
>>> X
[ 1.25 0.125 0.0833333333333333]
[ 0.125 1.08333333333333 0.0625]
[0.0833333333333333 0.0625 1.05]
>>> logm(expm(X))
[ 1.25 0.125 0.0833333333333333]
[ 0.125 1.08333333333333 0.0625]
[0.0833333333333333 0.0625 1.05]
>>> expm(logm(X))
[ 1.25 0.125 0.0833333333333333]
[ 0.125 1.08333333333333 0.0625]
[0.0833333333333333 0.0625 1.05]
A logarithm of a rotation matrix, giving back the angle of
the rotation::
>>> t = 3.7
>>> A = matrix([[cos(t),sin(t)],[-sin(t),cos(t)]])
>>> chop(logm(A))
[ 0.0 -2.58318530717959]
[2.58318530717959 0.0]
>>> (2*pi-t)
2.58318530717959
For some matrices, a logarithm does not exist::
>>> logm([[1,0], [0,0]])
Traceback (most recent call last):
...
ZeroDivisionError: matrix is numerically singular
Logarithm of a matrix with large entries::
>>> logm(hilbert(3) * 10**20).apply(re)
[ 45.5597513593433 1.27721006042799 0.317662687717978]
[ 1.27721006042799 42.5222778973542 2.24003708791604]
[0.317662687717978 2.24003708791604 42.395212822267]
"""
A = ctx.matrix(A)
prec = ctx.prec
try:
ctx.prec += 10
tol = ctx.eps * 128
I = A**0
B = A
n = 0
while 1:
B = ctx.sqrtm(B)
n += 1
if ctx.mnorm(B-I, 'inf') < 0.125:
break
T = X = B-I
L = X*0
k = 1
while 1:
if k & 1:
L += T / k
else:
L -= T / k
T *= X
if ctx.mnorm(T, 'inf') < tol:
break
k += 1
if k > ctx.prec:
raise ctx.NoConvergence
finally:
ctx.prec = prec
L *= 2**n
return L
def powm(ctx, A, r):
r"""
Computes `A^r = \exp(A \log r)` for a matrix `A` and complex
number `r`.
**Examples**
Powers and inverse powers of a matrix::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> A = matrix([[4,1,4],[7,8,9],[10,2,11]])
>>> powm(A, 2)
[ 63.0 20.0 69.0]
[174.0 89.0 199.0]
[164.0 48.0 179.0]
>>> chop(powm(powm(A, 4), 1/4.))
[ 4.0 1.0 4.0]
[ 7.0 8.0 9.0]
[10.0 2.0 11.0]
>>> powm(extraprec(20)(powm)(A, -4), -1/4.)
[ 4.0 1.0 4.0]
[ 7.0 8.0 9.0]
[10.0 2.0 11.0]
>>> chop(powm(powm(A, 1+0.5j), 1/(1+0.5j)))
[ 4.0 1.0 4.0]
[ 7.0 8.0 9.0]
[10.0 2.0 11.0]
>>> powm(extraprec(5)(powm)(A, -1.5), -1/(1.5))
[ 4.0 1.0 4.0]
[ 7.0 8.0 9.0]
[10.0 2.0 11.0]
A Fibonacci-generating matrix::
>>> powm([[1,1],[1,0]], 10)
[89.0 55.0]
[55.0 34.0]
>>> fib(10)
55.0
>>> powm([[1,1],[1,0]], 6.5)
[(16.5166626964253 - 0.0121089837381789j) (10.2078589271083 + 0.0195927472575932j)]
[(10.2078589271083 + 0.0195927472575932j) (6.30880376931698 - 0.0317017309957721j)]
>>> (phi**6.5 - (1-phi)**6.5)/sqrt(5)
(10.2078589271083 - 0.0195927472575932j)
>>> powm([[1,1],[1,0]], 6.2)
[ (14.3076953002666 - 0.008222855781077j) (8.81733464837593 + 0.0133048601383712j)]
[(8.81733464837593 + 0.0133048601383712j) (5.49036065189071 - 0.0215277159194482j)]
>>> (phi**6.2 - (1-phi)**6.2)/sqrt(5)
(8.81733464837593 - 0.0133048601383712j)
"""
A = ctx.matrix(A)
r = ctx.convert(r)
prec = ctx.prec
try:
ctx.prec += 10
if ctx.isint(r):
v = A ** int(r)
elif ctx.isint(r*2):
y = int(r*2)
v = ctx.sqrtm(A) ** y
else:
v = ctx.expm(r*ctx.logm(A))
finally:
ctx.prec = prec
v *= 1
return v
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/matrices/calculus.py
|
calculus.py
|
from ..libmp.backend import xrange
# TODO: interpret list as vectors (for multiplication)
rowsep = '\n'
colsep = ' '
class _matrix(object):
"""
Numerical matrix.
Specify the dimensions or the data as a nested list.
Elements default to zero.
Use a flat list to create a column vector easily.
By default, only mpf is used to store the data. You can specify another type
using force_type=type. It's possible to specify None.
Make sure force_type(force_type()) is fast.
Creating matrices
-----------------
Matrices in mpmath are implemented using dictionaries. Only non-zero values
are stored, so it is cheap to represent sparse matrices.
The most basic way to create one is to use the ``matrix`` class directly.
You can create an empty matrix specifying the dimensions:
>>> from mpmath import *
>>> mp.dps = 15
>>> matrix(2)
matrix(
[['0.0', '0.0'],
['0.0', '0.0']])
>>> matrix(2, 3)
matrix(
[['0.0', '0.0', '0.0'],
['0.0', '0.0', '0.0']])
Calling ``matrix`` with one dimension will create a square matrix.
To access the dimensions of a matrix, use the ``rows`` or ``cols`` keyword:
>>> A = matrix(3, 2)
>>> A
matrix(
[['0.0', '0.0'],
['0.0', '0.0'],
['0.0', '0.0']])
>>> A.rows
3
>>> A.cols
2
You can also change the dimension of an existing matrix. This will set the
new elements to 0. If the new dimension is smaller than before, the
concerning elements are discarded:
>>> A.rows = 2
>>> A
matrix(
[['0.0', '0.0'],
['0.0', '0.0']])
Internally ``mpmathify`` is used every time an element is set. This
is done using the syntax A[row,column], counting from 0:
>>> A = matrix(2)
>>> A[1,1] = 1 + 1j
>>> A
matrix(
[['0.0', '0.0'],
['0.0', '(1.0 + 1.0j)']])
You can use the keyword ``force_type`` to change the function which is
called on every new element:
>>> matrix(2, 5, force_type=int)
matrix(
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
A more comfortable way to create a matrix lets you use nested lists:
>>> matrix([[1, 2], [3, 4]])
matrix(
[['1.0', '2.0'],
['3.0', '4.0']])
If you want to preserve the type of the elements you can use
``force_type=None``:
>>> matrix([[1, 2.5], [1j, mpf(2)]], force_type=None)
matrix(
[[1, 2.5],
[1j, '2.0']])
Convenient advanced functions are available for creating various standard
matrices, see ``zeros``, ``ones``, ``diag``, ``eye``, ``randmatrix`` and
``hilbert``.
Vectors
.......
Vectors may also be represented by the ``matrix`` class (with rows = 1 or cols = 1).
For vectors there are some things which make life easier. A column vector can
be created using a flat list, a row vectors using an almost flat nested list::
>>> matrix([1, 2, 3])
matrix(
[['1.0'],
['2.0'],
['3.0']])
>>> matrix([[1, 2, 3]])
matrix(
[['1.0', '2.0', '3.0']])
Optionally vectors can be accessed like lists, using only a single index::
>>> x = matrix([1, 2, 3])
>>> x[1]
mpf('2.0')
>>> x[1,0]
mpf('2.0')
Other
.....
Like you probably expected, matrices can be printed::
>>> print randmatrix(3) # doctest:+SKIP
[ 0.782963853573023 0.802057689719883 0.427895717335467]
[0.0541876859348597 0.708243266653103 0.615134039977379]
[ 0.856151514955773 0.544759264818486 0.686210904770947]
Use ``nstr`` or ``nprint`` to specify the number of digits to print::
>>> nprint(randmatrix(5), 3) # doctest:+SKIP
[2.07e-1 1.66e-1 5.06e-1 1.89e-1 8.29e-1]
[6.62e-1 6.55e-1 4.47e-1 4.82e-1 2.06e-2]
[4.33e-1 7.75e-1 6.93e-2 2.86e-1 5.71e-1]
[1.01e-1 2.53e-1 6.13e-1 3.32e-1 2.59e-1]
[1.56e-1 7.27e-2 6.05e-1 6.67e-2 2.79e-1]
As matrices are mutable, you will need to copy them sometimes::
>>> A = matrix(2)
>>> A
matrix(
[['0.0', '0.0'],
['0.0', '0.0']])
>>> B = A.copy()
>>> B[0,0] = 1
>>> B
matrix(
[['1.0', '0.0'],
['0.0', '0.0']])
>>> A
matrix(
[['0.0', '0.0'],
['0.0', '0.0']])
Finally, it is possible to convert a matrix to a nested list. This is very useful,
as most Python libraries involving matrices or arrays (namely NumPy or SymPy)
support this format::
>>> B.tolist()
[[mpf('1.0'), mpf('0.0')], [mpf('0.0'), mpf('0.0')]]
Matrix operations
-----------------
You can add and subtract matrices of compatible dimensions::
>>> A = matrix([[1, 2], [3, 4]])
>>> B = matrix([[-2, 4], [5, 9]])
>>> A + B
matrix(
[['-1.0', '6.0'],
['8.0', '13.0']])
>>> A - B
matrix(
[['3.0', '-2.0'],
['-2.0', '-5.0']])
>>> A + ones(3) # doctest:+ELLIPSIS
Traceback (most recent call last):
...
ValueError: incompatible dimensions for addition
It is possible to multiply or add matrices and scalars. In the latter case the
operation will be done element-wise::
>>> A * 2
matrix(
[['2.0', '4.0'],
['6.0', '8.0']])
>>> A / 4
matrix(
[['0.25', '0.5'],
['0.75', '1.0']])
>>> A - 1
matrix(
[['0.0', '1.0'],
['2.0', '3.0']])
Of course you can perform matrix multiplication, if the dimensions are
compatible::
>>> A * B
matrix(
[['8.0', '22.0'],
['14.0', '48.0']])
>>> matrix([[1, 2, 3]]) * matrix([[-6], [7], [-2]])
matrix(
[['2.0']])
You can raise powers of square matrices::
>>> A**2
matrix(
[['7.0', '10.0'],
['15.0', '22.0']])
Negative powers will calculate the inverse::
>>> A**-1
matrix(
[['-2.0', '1.0'],
['1.5', '-0.5']])
>>> A * A**-1
matrix(
[['1.0', '1.0842021724855e-19'],
['-2.16840434497101e-19', '1.0']])
Matrix transposition is straightforward::
>>> A = ones(2, 3)
>>> A
matrix(
[['1.0', '1.0', '1.0'],
['1.0', '1.0', '1.0']])
>>> A.T
matrix(
[['1.0', '1.0'],
['1.0', '1.0'],
['1.0', '1.0']])
Norms
.....
Sometimes you need to know how "large" a matrix or vector is. Due to their
multidimensional nature it's not possible to compare them, but there are
several functions to map a matrix or a vector to a positive real number, the
so called norms.
For vectors the p-norm is intended, usually the 1-, the 2- and the oo-norm are
used.
>>> x = matrix([-10, 2, 100])
>>> norm(x, 1)
mpf('112.0')
>>> norm(x, 2)
mpf('100.5186549850325')
>>> norm(x, inf)
mpf('100.0')
Please note that the 2-norm is the most used one, though it is more expensive
to calculate than the 1- or oo-norm.
It is possible to generalize some vector norms to matrix norm::
>>> A = matrix([[1, -1000], [100, 50]])
>>> mnorm(A, 1)
mpf('1050.0')
>>> mnorm(A, inf)
mpf('1001.0')
>>> mnorm(A, 'F')
mpf('1006.2310867787777')
The last norm (the "Frobenius-norm") is an approximation for the 2-norm, which
is hard to calculate and not available. The Frobenius-norm lacks some
mathematical properties you might expect from a norm.
"""
def __init__(self, *args, **kwargs):
self.__data = {}
# LU decompostion cache, this is useful when solving the same system
# multiple times, when calculating the inverse and when calculating the
# determinant
self._LU = None
convert = kwargs.get('force_type', self.ctx.convert)
if isinstance(args[0], (list, tuple)):
if isinstance(args[0][0], (list, tuple)):
# interpret nested list as matrix
A = args[0]
self.__rows = len(A)
self.__cols = len(A[0])
for i, row in enumerate(A):
for j, a in enumerate(row):
self[i, j] = convert(a)
else:
# interpret list as row vector
v = args[0]
self.__rows = len(v)
self.__cols = 1
for i, e in enumerate(v):
self[i, 0] = e
elif isinstance(args[0], int):
# create empty matrix of given dimensions
if len(args) == 1:
self.__rows = self.__cols = args[0]
else:
assert isinstance(args[1], int), 'expected int'
self.__rows = args[0]
self.__cols = args[1]
elif isinstance(args[0], _matrix):
A = args[0].copy()
self.__data = A._matrix__data
self.__rows = A._matrix__rows
self.__cols = A._matrix__cols
convert = kwargs.get('force_type', self.ctx.convert)
for i in xrange(A.__rows):
for j in xrange(A.__cols):
A[i,j] = convert(A[i,j])
elif hasattr(args[0], 'tolist'):
A = self.ctx.matrix(args[0].tolist())
self.__data = A._matrix__data
self.__rows = A._matrix__rows
self.__cols = A._matrix__cols
else:
raise TypeError('could not interpret given arguments')
def apply(self, f):
"""
Return a copy of self with the function `f` applied elementwise.
"""
new = self.ctx.matrix(self.__rows, self.__cols)
for i in xrange(self.__rows):
for j in xrange(self.__cols):
new[i,j] = f(self[i,j])
return new
def __nstr__(self, n=None, **kwargs):
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
if n:
string = self.ctx.nstr(self[i,j], n, **kwargs)
else:
string = str(self[i,j])
res[-1].append(string)
maxlen[j] = max(len(string), maxlen[j])
# Patch strings together
for i, row in enumerate(res):
for j, elem in enumerate(row):
# Pad each element up to maxlen so the columns line up
row[j] = elem.rjust(maxlen[j])
res[i] = "[" + colsep.join(row) + "]"
return rowsep.join(res)
def __str__(self):
return self.__nstr__()
def _toliststr(self, avoid_type=False):
"""
Create a list string from a matrix.
If avoid_type: avoid multiple 'mpf's.
"""
# XXX: should be something like self.ctx._types
typ = self.ctx.mpf
s = '['
for i in xrange(self.__rows):
s += '['
for j in xrange(self.__cols):
if not avoid_type or not isinstance(self[i,j], typ):
a = repr(self[i,j])
else:
a = "'" + str(self[i,j]) + "'"
s += a + ', '
s = s[:-2]
s += '],\n '
s = s[:-3]
s += ']'
return s
def tolist(self):
"""
Convert the matrix to a nested list.
"""
return [[self[i,j] for j in range(self.__cols)] for i in range(self.__rows)]
def __repr__(self):
if self.ctx.pretty:
return self.__str__()
s = 'matrix(\n'
s += self._toliststr(avoid_type=True) + ')'
return s
def __get_element(self, key):
'''
Fast extraction of the i,j element from the matrix
This function is for private use only because is unsafe:
1. Does not check on the value of key it expects key to be a integer tuple (i,j)
2. Does not check bounds
'''
if key in self.__data:
return self.__data[key]
else:
return self.ctx.zero
def __set_element(self, key, value):
'''
Fast assignment of the i,j element in the matrix
This function is unsafe:
1. Does not check on the value of key it expects key to be a integer tuple (i,j)
2. Does not check bounds
3. Does not check the value type
'''
if value: # only store non-zeros
self.__data[key] = value
elif key in self.__data:
del self.__data[key]
def __getitem__(self, key):
'''
Getitem function for mp matrix class with slice index enabled
it allows the following assingments
scalar to a slice of the matrix
B = A[:,2:6]
'''
# Convert vector to matrix indexing
if isinstance(key, int) or isinstance(key,slice):
# only sufficent for vectors
if self.__rows == 1:
key = (0, key)
elif self.__cols == 1:
key = (key, 0)
else:
raise IndexError('insufficient indices for matrix')
if isinstance(key[0],slice) or isinstance(key[1],slice):
#Rows
if isinstance(key[0],slice):
#Check bounds
if (key[0].start is None or key[0].start >= 0) and \
(key[0].stop is None or key[0].stop <= self.__rows+1):
# Generate indices
rows = xrange(*key[0].indices(self.__rows))
else:
raise IndexError('Row index out of bounds')
else:
# Single row
rows = [key[0]]
# Columns
if isinstance(key[1],slice):
# Check bounds
if (key[1].start is None or key[1].start >= 0) and \
(key[1].stop is None or key[1].stop <= self.__cols+1):
# Generate indices
columns = xrange(*key[1].indices(self.__cols))
else:
raise IndexError('Column index out of bounds')
else:
# Single column
columns = [key[1]]
# Create matrix slice
m = self.ctx.matrix(len(rows),len(columns))
# Assign elements to the output matrix
for i,x in enumerate(rows):
for j,y in enumerate(columns):
m.__set_element((i,j),self.__get_element((x,y)))
return m
else:
# single element extraction
if key[0] >= self.__rows or key[1] >= self.__cols:
raise IndexError('matrix index out of range')
if key in self.__data:
return self.__data[key]
else:
return self.ctx.zero
def __setitem__(self, key, value):
# setitem function for mp matrix class with slice index enabled
# it allows the following assingments
# scalar to a slice of the matrix
# A[:,2:6] = 2.5
# submatrix to matrix (the value matrix should be the same size as the slice size)
# A[3,:] = B where A is n x m and B is n x 1
# Convert vector to matrix indexing
if isinstance(key, int) or isinstance(key,slice):
# only sufficent for vectors
if self.__rows == 1:
key = (0, key)
elif self.__cols == 1:
key = (key, 0)
else:
raise IndexError('insufficient indices for matrix')
# Slice indexing
if isinstance(key[0],slice) or isinstance(key[1],slice):
# Rows
if isinstance(key[0],slice):
# Check bounds
if (key[0].start is None or key[0].start >= 0) and \
(key[0].stop is None or key[0].stop <= self.__rows+1):
# generate row indices
rows = xrange(*key[0].indices(self.__rows))
else:
raise IndexError('Row index out of bounds')
else:
# Single row
rows = [key[0]]
# Columns
if isinstance(key[1],slice):
# Check bounds
if (key[1].start is None or key[1].start >= 0) and \
(key[1].stop is None or key[1].stop <= self.__cols+1):
# Generate column indices
columns = xrange(*key[1].indices(self.__cols))
else:
raise IndexError('Column index out of bounds')
else:
# Single column
columns = [key[1]]
# Assign slice with a scalar
if isinstance(value,self.ctx.matrix):
# Assign elements to matrix if input and output dimensions match
if len(rows) == value.rows and len(columns) == value.cols:
for i,x in enumerate(rows):
for j,y in enumerate(columns):
self.__set_element((x,y), value.__get_element((i,j)))
else:
raise ValueError('Dimensions do not match')
else:
# Assign slice with scalars
value = self.ctx.convert(value)
for i in rows:
for j in columns:
self.__set_element((i,j), value)
else:
# Single element assingment
# Check bounds
if key[0] >= self.__rows or key[1] >= self.__cols:
raise IndexError('matrix index out of range')
# Convert and store value
value = self.ctx.convert(value)
if value: # only store non-zeros
self.__data[key] = value
elif key in self.__data:
del self.__data[key]
if self._LU:
self._LU = None
return
def __iter__(self):
for i in xrange(self.__rows):
for j in xrange(self.__cols):
yield self[i,j]
def __mul__(self, other):
if isinstance(other, self.ctx.matrix):
# dot multiplication TODO: use Strassen's method?
if self.__cols != other.__rows:
raise ValueError('dimensions not compatible for multiplication')
new = self.ctx.matrix(self.__rows, other.__cols)
for i in xrange(self.__rows):
for j in xrange(other.__cols):
new[i, j] = self.ctx.fdot((self[i,k], other[k,j])
for k in xrange(other.__rows))
return new
else:
# try scalar multiplication
new = self.ctx.matrix(self.__rows, self.__cols)
for i in xrange(self.__rows):
for j in xrange(self.__cols):
new[i, j] = other * self[i, j]
return new
def __rmul__(self, other):
# assume other is scalar and thus commutative
assert not isinstance(other, self.ctx.matrix)
return self.__mul__(other)
def __pow__(self, other):
# avoid cyclic import problems
#from linalg import inverse
if not isinstance(other, int):
raise ValueError('only integer exponents are supported')
if not self.__rows == self.__cols:
raise ValueError('only powers of square matrices are defined')
n = other
if n == 0:
return self.ctx.eye(self.__rows)
if n < 0:
n = -n
neg = True
else:
neg = False
i = n
y = 1
z = self.copy()
while i != 0:
if i % 2 == 1:
y = y * z
z = z*z
i = i // 2
if neg:
y = self.ctx.inverse(y)
return y
def __div__(self, other):
# assume other is scalar and do element-wise divison
assert not isinstance(other, self.ctx.matrix)
new = self.ctx.matrix(self.__rows, self.__cols)
for i in xrange(self.__rows):
for j in xrange(self.__cols):
new[i,j] = self[i,j] / other
return new
__truediv__ = __div__
def __add__(self, other):
if isinstance(other, self.ctx.matrix):
if not (self.__rows == other.__rows and self.__cols == other.__cols):
raise ValueError('incompatible dimensions for addition')
new = self.ctx.matrix(self.__rows, self.__cols)
for i in xrange(self.__rows):
for j in xrange(self.__cols):
new[i,j] = self[i,j] + other[i,j]
return new
else:
# assume other is scalar and add element-wise
new = self.ctx.matrix(self.__rows, self.__cols)
for i in xrange(self.__rows):
for j in xrange(self.__cols):
new[i,j] += self[i,j] + other
return new
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, self.ctx.matrix) and not (self.__rows == other.__rows
and self.__cols == other.__cols):
raise ValueError('incompatible dimensions for substraction')
return self.__add__(other * (-1))
def __neg__(self):
return (-1) * self
def __rsub__(self, other):
return -self + other
def __eq__(self, other):
return self.__rows == other.__rows and self.__cols == other.__cols \
and self.__data == other.__data
def __len__(self):
if self.rows == 1:
return self.cols
elif self.cols == 1:
return self.rows
else:
return self.rows # do it like numpy
def __getrows(self):
return self.__rows
def __setrows(self, value):
for key in self.__data.copy():
if key[0] >= value:
del self.__data[key]
self.__rows = value
rows = property(__getrows, __setrows, doc='number of rows')
def __getcols(self):
return self.__cols
def __setcols(self, value):
for key in self.__data.copy():
if key[1] >= value:
del self.__data[key]
self.__cols = value
cols = property(__getcols, __setcols, doc='number of columns')
def transpose(self):
new = self.ctx.matrix(self.__cols, self.__rows)
for i in xrange(self.__rows):
for j in xrange(self.__cols):
new[j,i] = self[i,j]
return new
T = property(transpose)
def conjugate(self):
return self.apply(self.ctx.conj)
def transpose_conj(self):
return self.conjugate().transpose()
H = property(transpose_conj)
def copy(self):
new = self.ctx.matrix(self.__rows, self.__cols)
new.__data = self.__data.copy()
return new
__copy__ = copy
def column(self, n):
m = self.ctx.matrix(self.rows, 1)
for i in range(self.rows):
m[i] = self[i,n]
return m
class MatrixMethods(object):
def __init__(ctx):
# XXX: subclass
ctx.matrix = type('matrix', (_matrix,), {})
ctx.matrix.ctx = ctx
ctx.matrix.convert = ctx.convert
def eye(ctx, n, **kwargs):
"""
Create square identity matrix n x n.
"""
A = ctx.matrix(n, **kwargs)
for i in xrange(n):
A[i,i] = 1
return A
def diag(ctx, diagonal, **kwargs):
"""
Create square diagonal matrix using given list.
Example:
>>> from mpmath import diag, mp
>>> mp.pretty = False
>>> diag([1, 2, 3])
matrix(
[['1.0', '0.0', '0.0'],
['0.0', '2.0', '0.0'],
['0.0', '0.0', '3.0']])
"""
A = ctx.matrix(len(diagonal), **kwargs)
for i in xrange(len(diagonal)):
A[i,i] = diagonal[i]
return A
def zeros(ctx, *args, **kwargs):
"""
Create matrix m x n filled with zeros.
One given dimension will create square matrix n x n.
Example:
>>> from mpmath import zeros, mp
>>> mp.pretty = False
>>> zeros(2)
matrix(
[['0.0', '0.0'],
['0.0', '0.0']])
"""
if len(args) == 1:
m = n = args[0]
elif len(args) == 2:
m = args[0]
n = args[1]
else:
raise TypeError('zeros expected at most 2 arguments, got %i' % len(args))
A = ctx.matrix(m, n, **kwargs)
for i in xrange(m):
for j in xrange(n):
A[i,j] = 0
return A
def ones(ctx, *args, **kwargs):
"""
Create matrix m x n filled with ones.
One given dimension will create square matrix n x n.
Example:
>>> from mpmath import ones, mp
>>> mp.pretty = False
>>> ones(2)
matrix(
[['1.0', '1.0'],
['1.0', '1.0']])
"""
if len(args) == 1:
m = n = args[0]
elif len(args) == 2:
m = args[0]
n = args[1]
else:
raise TypeError('ones expected at most 2 arguments, got %i' % len(args))
A = ctx.matrix(m, n, **kwargs)
for i in xrange(m):
for j in xrange(n):
A[i,j] = 1
return A
def hilbert(ctx, m, n=None):
"""
Create (pseudo) hilbert matrix m x n.
One given dimension will create hilbert matrix n x n.
The matrix is very ill-conditioned and symmetric, positive definite if
square.
"""
if n is None:
n = m
A = ctx.matrix(m, n)
for i in xrange(m):
for j in xrange(n):
A[i,j] = ctx.one / (i + j + 1)
return A
def randmatrix(ctx, m, n=None, min=0, max=1, **kwargs):
"""
Create a random m x n matrix.
All values are >= min and <max.
n defaults to m.
Example:
>>> from mpmath import randmatrix
>>> randmatrix(2) # doctest:+SKIP
matrix(
[['0.53491598236191806', '0.57195669543302752'],
['0.85589992269513615', '0.82444367501382143']])
"""
if not n:
n = m
A = ctx.matrix(m, n, **kwargs)
for i in xrange(m):
for j in xrange(n):
A[i,j] = ctx.rand() * (max - min) + min
return A
def swap_row(ctx, A, i, j):
"""
Swap row i with row j.
"""
if i == j:
return
if isinstance(A, ctx.matrix):
for k in xrange(A.cols):
A[i,k], A[j,k] = A[j,k], A[i,k]
elif isinstance(A, list):
A[i], A[j] = A[j], A[i]
else:
raise TypeError('could not interpret type')
def extend(ctx, A, b):
"""
Extend matrix A with column b and return result.
"""
assert isinstance(A, ctx.matrix)
assert A.rows == len(b)
A = A.copy()
A.cols += 1
for i in xrange(A.rows):
A[i, A.cols-1] = b[i]
return A
def norm(ctx, x, p=2):
r"""
Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm
`\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`.
Special cases:
If *x* is not iterable, this just returns ``absmax(x)``.
``p=1`` gives the sum of absolute values.
``p=2`` is the standard Euclidean vector norm.
``p=inf`` gives the magnitude of the largest element.
For *x* a matrix, ``p=2`` is the Frobenius norm.
For operator matrix norms, use :func:`~mpmath.mnorm` instead.
You can use the string 'inf' as well as float('inf') or mpf('inf')
to specify the infinity norm.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> x = matrix([-10, 2, 100])
>>> norm(x, 1)
mpf('112.0')
>>> norm(x, 2)
mpf('100.5186549850325')
>>> norm(x, inf)
mpf('100.0')
"""
try:
iter(x)
except TypeError:
return ctx.absmax(x)
if type(p) is not int:
p = ctx.convert(p)
if p == ctx.inf:
return max(ctx.absmax(i) for i in x)
elif p == 1:
return ctx.fsum(x, absolute=1)
elif p == 2:
return ctx.sqrt(ctx.fsum(x, absolute=1, squared=1))
elif p > 1:
return ctx.nthroot(ctx.fsum(abs(i)**p for i in x), p)
else:
raise ValueError('p has to be >= 1')
def mnorm(ctx, A, p=1):
r"""
Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf``
are supported:
``p=1`` gives the 1-norm (maximal column sum)
``p=inf`` gives the `\infty`-norm (maximal row sum).
You can use the string 'inf' as well as float('inf') or mpf('inf')
``p=2`` (not implemented) for a square matrix is the usual spectral
matrix norm, i.e. the largest singular value.
``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the
Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an
approximation of the spectral norm and satisfies
.. math ::
\frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F
The Frobenius norm lacks some mathematical properties that might
be expected of a norm.
For general elementwise `p`-norms, use :func:`~mpmath.norm` instead.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> A = matrix([[1, -1000], [100, 50]])
>>> mnorm(A, 1)
mpf('1050.0')
>>> mnorm(A, inf)
mpf('1001.0')
>>> mnorm(A, 'F')
mpf('1006.2310867787777')
"""
A = ctx.matrix(A)
if type(p) is not int:
if type(p) is str and 'frobenius'.startswith(p.lower()):
return ctx.norm(A, 2)
p = ctx.convert(p)
m, n = A.rows, A.cols
if p == 1:
return max(ctx.fsum((A[i,j] for i in xrange(m)), absolute=1) for j in xrange(n))
elif p == ctx.inf:
return max(ctx.fsum((A[i,j] for j in xrange(n)), absolute=1) for i in xrange(m))
else:
raise NotImplementedError("matrix p-norm for arbitrary p")
if __name__ == '__main__':
import doctest
doctest.testmod()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/matrices/matrices.py
|
matrices.py
|
from copy import copy
from ..libmp.backend import xrange
class LinearAlgebraMethods(object):
def LU_decomp(ctx, A, overwrite=False, use_cache=True):
"""
LU-factorization of a n*n matrix using the Gauss algorithm.
Returns L and U in one matrix and the pivot indices.
Use overwrite to specify whether A will be overwritten with L and U.
"""
if not A.rows == A.cols:
raise ValueError('need n*n matrix')
# get from cache if possible
if use_cache and isinstance(A, ctx.matrix) and A._LU:
return A._LU
if not overwrite:
orig = A
A = A.copy()
tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger
n = A.rows
p = [None]*(n - 1)
for j in xrange(n - 1):
# pivoting, choose max(abs(reciprocal row sum)*abs(pivot element))
biggest = 0
for k in xrange(j, n):
s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)])
if ctx.absmin(s) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
current = 1/s * ctx.absmin(A[k,j])
if current > biggest: # TODO: what if equal?
biggest = current
p[j] = k
# swap rows according to p
ctx.swap_row(A, j, p[j])
if ctx.absmin(A[j,j]) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
# calculate elimination factors and add rows
for i in xrange(j + 1, n):
A[i,j] /= A[j,j]
for k in xrange(j + 1, n):
A[i,k] -= A[i,j]*A[j,k]
if ctx.absmin(A[n - 1,n - 1]) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
# cache decomposition
if not overwrite and isinstance(orig, ctx.matrix):
orig._LU = (A, p)
return A, p
def L_solve(ctx, L, b, p=None):
"""
Solve the lower part of a LU factorized matrix for y.
"""
assert L.rows == L.cols, 'need n*n matrix'
n = L.rows
assert len(b) == n
b = copy(b)
if p: # swap b according to p
for k in xrange(0, len(p)):
ctx.swap_row(b, k, p[k])
# solve
for i in xrange(1, n):
for j in xrange(i):
b[i] -= L[i,j] * b[j]
return b
def U_solve(ctx, U, y):
"""
Solve the upper part of a LU factorized matrix for x.
"""
assert U.rows == U.cols, 'need n*n matrix'
n = U.rows
assert len(y) == n
x = copy(y)
for i in xrange(n - 1, -1, -1):
for j in xrange(i + 1, n):
x[i] -= U[i,j] * x[j]
x[i] /= U[i,i]
return x
def lu_solve(ctx, A, b, **kwargs):
"""
Ax = b => x
Solve a determined or overdetermined linear equations system.
Fast LU decomposition is used, which is less accurate than QR decomposition
(especially for overdetermined systems), but it's twice as efficient.
Use qr_solve if you want more precision or have to solve a very ill-
conditioned system.
If you specify real=True, it does not check for overdeterminded complex
systems.
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows < A.cols:
raise ValueError('cannot solve underdetermined system')
if A.rows > A.cols:
# use least-squares method if overdetermined
# (this increases errors)
AH = A.H
A = AH * A
b = AH * b
if (kwargs.get('real', False) or
not sum(type(i) is ctx.mpc for i in A)):
# TODO: necessary to check also b?
x = ctx.cholesky_solve(A, b)
else:
x = ctx.lu_solve(A, b)
else:
# LU factorization
A, p = ctx.LU_decomp(A)
b = ctx.L_solve(A, b, p)
x = ctx.U_solve(A, b)
finally:
ctx.prec = prec
return x
def improve_solution(ctx, A, x, b, maxsteps=1):
"""
Improve a solution to a linear equation system iteratively.
This re-uses the LU decomposition and is thus cheap.
Usually 3 up to 4 iterations are giving the maximal improvement.
"""
assert A.rows == A.cols, 'need n*n matrix' # TODO: really?
for _ in xrange(maxsteps):
r = ctx.residual(A, x, b)
if ctx.norm(r, 2) < 10*ctx.eps:
break
# this uses cached LU decomposition and is thus cheap
dx = ctx.lu_solve(A, -r)
x += dx
return x
def lu(ctx, A):
"""
A -> P, L, U
LU factorisation of a square matrix A. L is the lower, U the upper part.
P is the permutation matrix indicating the row swaps.
P*A = L*U
If you need efficiency, use the low-level method LU_decomp instead, it's
much more memory efficient.
"""
# get factorization
A, p = ctx.LU_decomp(A)
n = A.rows
L = ctx.matrix(n)
U = ctx.matrix(n)
for i in xrange(n):
for j in xrange(n):
if i > j:
L[i,j] = A[i,j]
elif i == j:
L[i,j] = 1
U[i,j] = A[i,j]
else:
U[i,j] = A[i,j]
# calculate permutation matrix
P = ctx.eye(n)
for k in xrange(len(p)):
ctx.swap_row(P, k, p[k])
return P, L, U
def unitvector(ctx, n, i):
"""
Return the i-th n-dimensional unit vector.
"""
assert 0 < i <= n, 'this unit vector does not exist'
return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i)
def inverse(ctx, A, **kwargs):
"""
Calculate the inverse of a matrix.
If you want to solve an equation system Ax = b, it's recommended to use
solve(A, b) instead, it's about 3 times more efficient.
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A
A = ctx.matrix(A, **kwargs).copy()
n = A.rows
# get LU factorisation
A, p = ctx.LU_decomp(A)
cols = []
# calculate unit vectors and solve corresponding system to get columns
for i in xrange(1, n + 1):
e = ctx.unitvector(n, i)
y = ctx.L_solve(A, e, p)
cols.append(ctx.U_solve(A, y))
# convert columns to matrix
inv = []
for i in xrange(n):
row = []
for j in xrange(n):
row.append(cols[j][i])
inv.append(row)
result = ctx.matrix(inv, **kwargs)
finally:
ctx.prec = prec
return result
def householder(ctx, A):
"""
(A|b) -> H, p, x, res
(A|b) is the coefficient matrix with left hand side of an optionally
overdetermined linear equation system.
H and p contain all information about the transformation matrices.
x is the solution, res the residual.
"""
assert isinstance(A, ctx.matrix)
m = A.rows
n = A.cols
assert m >= n - 1
# calculate Householder matrix
p = []
for j in xrange(0, n - 1):
s = ctx.fsum((A[i,j])**2 for i in xrange(j, m))
if not abs(s) > ctx.eps:
raise ValueError('matrix is numerically singular')
p.append(-ctx.sign(A[j,j]) * ctx.sqrt(s))
kappa = ctx.one / (s - p[j] * A[j,j])
A[j,j] -= p[j]
for k in xrange(j+1, n):
y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j, m)) * kappa
for i in xrange(j, m):
A[i,k] -= A[i,j] * y
# solve Rx = c1
x = [A[i,n - 1] for i in xrange(n - 1)]
for i in xrange(n - 2, -1, -1):
x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1))
x[i] /= p[i]
# calculate residual
if not m == n - 1:
r = [A[m-1-i, n-1] for i in xrange(m - n + 1)]
else:
# determined system, residual should be 0
r = [0]*m # maybe a bad idea, changing r[i] will change all elements
return A, p, x, r
#def qr(ctx, A):
# """
# A -> Q, R
#
# QR factorisation of a square matrix A using Householder decomposition.
# Q is orthogonal, this leads to very few numerical errors.
#
# A = Q*R
# """
# H, p, x, res = householder(A)
# TODO: implement this
def residual(ctx, A, x, b, **kwargs):
"""
Calculate the residual of a solution to a linear equation system.
r = A*x - b for A*x = b
"""
oldprec = ctx.prec
try:
ctx.prec *= 2
A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs)
return A*x - b
finally:
ctx.prec = oldprec
def qr_solve(ctx, A, b, norm=None, **kwargs):
"""
Ax = b => x, ||Ax - b||
Solve a determined or overdetermined linear equations system and
calculate the norm of the residual (error).
QR decomposition using Householder factorization is applied, which gives very
accurate results even for ill-conditioned matrices. qr_solve is twice as
efficient.
"""
if norm is None:
norm = ctx.norm
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows < A.cols:
raise ValueError('cannot solve underdetermined system')
H, p, x, r = ctx.householder(ctx.extend(A, b))
res = ctx.norm(r)
# calculate residual "manually" for determined systems
if res == 0:
res = ctx.norm(ctx.residual(A, x, b))
return ctx.matrix(x, **kwargs), res
finally:
ctx.prec = prec
def cholesky(ctx, A, tol=None):
r"""
Cholesky decomposition of a symmetric positive-definite matrix `A`.
Returns a lower triangular matrix `L` such that `A = L \times L^T`.
More generally, for a complex Hermitian positive-definite matrix,
a Cholesky decomposition satisfying `A = L \times L^H` is returned.
The Cholesky decomposition can be used to solve linear equation
systems twice as efficiently as LU decomposition, or to
test whether `A` is positive-definite.
The optional parameter ``tol`` determines the tolerance for
verifying positive-definiteness.
**Examples**
Cholesky decomposition of a positive-definite symmetric matrix::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> A = eye(3) + hilbert(3)
>>> nprint(A)
[ 2.0 0.5 0.333333]
[ 0.5 1.33333 0.25]
[0.333333 0.25 1.2]
>>> L = cholesky(A)
>>> nprint(L)
[ 1.41421 0.0 0.0]
[0.353553 1.09924 0.0]
[0.235702 0.15162 1.05899]
>>> chop(A - L*L.T)
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
Cholesky decomposition of a Hermitian matrix::
>>> A = eye(3) + matrix([[0,0.25j,-0.5j],[-0.25j,0,0],[0.5j,0,0]])
>>> L = cholesky(A)
>>> nprint(L)
[ 1.0 0.0 0.0]
[(0.0 - 0.25j) (0.968246 + 0.0j) 0.0]
[ (0.0 + 0.5j) (0.129099 + 0.0j) (0.856349 + 0.0j)]
>>> chop(A - L*L.H)
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
Attempted Cholesky decomposition of a matrix that is not positive
definite::
>>> A = -eye(3) + hilbert(3)
>>> L = cholesky(A)
Traceback (most recent call last):
...
ValueError: matrix is not positive-definite
**References**
1. [Wikipedia]_ http://en.wikipedia.org/wiki/Cholesky_decomposition
"""
assert isinstance(A, ctx.matrix)
if not A.rows == A.cols:
raise ValueError('need n*n matrix')
if tol is None:
tol = +ctx.eps
n = A.rows
L = ctx.matrix(n)
for j in xrange(n):
c = ctx.re(A[j,j])
if abs(c-A[j,j]) > tol:
raise ValueError('matrix is not Hermitian')
s = c - ctx.fsum((L[j,k] for k in xrange(j)),
absolute=True, squared=True)
if s < tol:
raise ValueError('matrix is not positive-definite')
L[j,j] = ctx.sqrt(s)
for i in xrange(j, n):
it1 = (L[i,k] for k in xrange(j))
it2 = (L[j,k] for k in xrange(j))
t = ctx.fdot(it1, it2, conjugate=True)
L[i,j] = (A[i,j] - t) / L[j,j]
return L
def cholesky_solve(ctx, A, b, **kwargs):
"""
Ax = b => x
Solve a symmetric positive-definite linear equation system.
This is twice as efficient as lu_solve.
Typical use cases:
* A.T*A
* Hessian matrix
* differential equations
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows != A.cols:
raise ValueError('can only solve determined system')
# Cholesky factorization
L = ctx.cholesky(A)
# solve
n = L.rows
assert len(b) == n
for i in xrange(n):
b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i))
b[i] /= L[i,i]
x = ctx.U_solve(L.T, b)
return x
finally:
ctx.prec = prec
def det(ctx, A):
"""
Calculate the determinant of a matrix.
"""
prec = ctx.prec
try:
# do not overwrite A
A = ctx.matrix(A).copy()
# use LU factorization to calculate determinant
try:
R, p = ctx.LU_decomp(A)
except ZeroDivisionError:
return 0
z = 1
for i, e in enumerate(p):
if i != e:
z *= -1
for i in xrange(A.rows):
z *= R[i,i]
return z
finally:
ctx.prec = prec
def cond(ctx, A, norm=None):
"""
Calculate the condition number of a matrix using a specified matrix norm.
The condition number estimates the sensitivity of a matrix to errors.
Example: small input errors for ill-conditioned coefficient matrices
alter the solution of the system dramatically.
For ill-conditioned matrices it's recommended to use qr_solve() instead
of lu_solve(). This does not help with input errors however, it just avoids
to add additional errors.
Definition: cond(A) = ||A|| * ||A**-1||
"""
if norm is None:
norm = lambda x: ctx.mnorm(x,1)
return norm(A) * norm(ctx.inverse(A))
def lu_solve_mat(ctx, a, b):
"""Solve a * x = b where a and b are matrices."""
r = ctx.matrix(a.rows, b.cols)
for i in range(b.cols):
c = ctx.lu_solve(a, b.column(i))
for j in range(len(c)):
r[j, i] = c[j]
return r
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/matrices/linalg.py
|
linalg.py
|
from bisect import bisect
from ..libmp.backend import xrange
class ODEMethods(object):
pass
def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
h = tol = ctx.ldexp(1, -tol_prec)
dim = len(y0)
xs = [x0]
ys = [y0]
x = x0
y = y0
orig = ctx.prec
try:
ctx.prec = orig*(1+n)
# Use n steps with Euler's method to get
# evaluation points for derivatives
for i in range(n):
fxy = derivs(x, y)
y = [y[i]+h*fxy[i] for i in xrange(len(y))]
x += h
xs.append(x)
ys.append(y)
# Compute derivatives
ser = [[] for d in range(dim)]
for j in range(n+1):
s = [0]*dim
b = (-1) ** (j & 1)
k = 1
for i in range(j+1):
for d in range(dim):
s[d] += b * ys[i][d]
b = (b * (j-k+1)) // (-k)
k += 1
scale = h**(-j) / ctx.fac(j)
for d in range(dim):
s[d] = s[d] * scale
ser[d].append(s[d])
finally:
ctx.prec = orig
# Estimate radius for which we can get full accuracy.
# XXX: do this right for zeros
radius = ctx.one
for ts in ser:
if ts[-1]:
radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
radius /= 2 # XXX
return ser, x0+radius
def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
r"""
Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
that is a numerical solution of the `n+1`-dimensional first-order
ordinary differential equation (ODE) system
.. math ::
y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
\vdots
y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
The derivatives are specified by the vector-valued function
*F* that evaluates
`[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
The initial point `x_0` is specified by the scalar argument *x0*,
and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
specified by the vector argument *y0*.
For convenience, if the system is one-dimensional, you may optionally
provide just a scalar value for *y0*. In this case, *F* should accept
a scalar *y* argument and return a scalar. The solution function
*y* will return scalar values instead of length-1 vectors.
Evaluation of the solution function `y(x)` is permitted
for any `x \ge x_0`.
A high-order ODE can be solved by transforming it into first-order
vector form. This transformation is described in standard texts
on ODEs. Examples will also be given below.
**Options, speed and accuracy**
By default, :func:`~mpmath.odefun` uses a high-order Taylor series
method. For reasonably well-behaved problems, the solution will
be fully accurate to within the working precision. Note that
*F* must be possible to evaluate to very high precision
for the generation of Taylor series to work.
To get a faster but less accurate solution, you can set a large
value for *tol* (which defaults roughly to *eps*). If you just
want to plot the solution or perform a basic simulation,
*tol = 0.01* is likely sufficient.
The *degree* argument controls the degree of the solver (with
*method='taylor'*, this is the degree of the Taylor series
expansion). A higher degree means that a longer step can be taken
before a new local solution must be generated from *F*,
meaning that fewer steps are required to get from `x_0` to a given
`x_1`. On the other hand, a higher degree also means that each
local solution becomes more expensive (i.e., more evaluations of
*F* are required per step, and at higher precision).
The optimal setting therefore involves a tradeoff. Generally,
decreasing the *degree* for Taylor series is likely to give faster
solution at low precision, while increasing is likely to be better
at higher precision.
The function
object returned by :func:`~mpmath.odefun` caches the solutions at all step
points and uses polynomial interpolation between step points.
Therefore, once `y(x_1)` has been evaluated for some `x_1`,
`y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
and continuing the evaluation up to `x_2 > x_1` is also fast.
**Examples of first-order ODEs**
We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
which has explicit solution `y(x) = \exp(x)`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = odefun(lambda x, y: y, 0, 1)
>>> for x in [0, 1, 2.5]:
... print((f(x), exp(x)))
...
(1.0, 1.0)
(2.71828182845905, 2.71828182845905)
(12.1824939607035, 12.1824939607035)
The solution with high precision::
>>> mp.dps = 50
>>> f = odefun(lambda x, y: y, 0, 1)
>>> f(1)
2.7182818284590452353602874713526624977572470937
>>> exp(1)
2.7182818284590452353602874713526624977572470937
Using the more general vectorized form, the test problem
can be input as (note that *f* returns a 1-element vector)::
>>> mp.dps = 15
>>> f = odefun(lambda x, y: [y[0]], 0, [1])
>>> f(1)
[2.71828182845905]
:func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
impossible (and at best difficult) to solve analytically. As
an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
for `y(0) = \pi/2`. An exact solution happens to be known
for this problem, and is given by
`y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
>>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
>>> for x in [2, 5, 10]:
... print((f(x), 2*atan(exp(mpf(x)**2/2))))
...
(2.87255666284091, 2.87255666284091)
(3.14158520028345, 3.14158520028345)
(3.14159265358979, 3.14159265358979)
If `F` is independent of `y`, an ODE can be solved using direct
integration. We can therefore obtain a reference solution with
:func:`~mpmath.quad`::
>>> f = lambda x: (1+x**2)/(1+x**3)
>>> g = odefun(lambda x, y: f(x), pi, 0)
>>> g(2*pi)
0.72128263801696
>>> quad(f, [pi, 2*pi])
0.72128263801696
**Examples of second-order ODEs**
We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
whereby the original equation can be written as `y_1' + y_0' = 0`. Put
together, we get the first-order, two-dimensional vector ODE
.. math ::
\begin{cases}
y_0' = y_1 \\
y_1' = -y_0
\end{cases}
To get a well-defined IVP, we need two initial values. With
`y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
course be solved by `y(x) = y_0(x) = \cos(x)` and
`-y'(x) = y_1(x) = \sin(x)`. We check this::
>>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
>>> for x in [0, 1, 2.5, 10]:
... nprint(f(x), 15)
... nprint([cos(x), sin(x)], 15)
... print("---")
...
[1.0, 0.0]
[1.0, 0.0]
---
[0.54030230586814, 0.841470984807897]
[0.54030230586814, 0.841470984807897]
---
[-0.801143615546934, 0.598472144103957]
[-0.801143615546934, 0.598472144103957]
---
[-0.839071529076452, -0.54402111088937]
[-0.839071529076452, -0.54402111088937]
---
Note that we get both the sine and the cosine solutions
simultaneously.
**TODO**
* Better automatic choice of degree and step size
* Make determination of Taylor series convergence radius
more robust
* Allow solution for `x < x_0`
* Allow solution for complex `x`
* Test for difficult (ill-conditioned) problems
* Implement Runge-Kutta and other algorithms
"""
if tol:
tol_prec = int(-ctx.log(tol, 2))+10
else:
tol_prec = ctx.prec+10
degree = degree or (3 + int(3*ctx.dps/2.))
workprec = ctx.prec + 40
try:
len(y0)
return_vector = True
except TypeError:
F_ = F
F = lambda x, y: [F_(x, y[0])]
y0 = [y0]
return_vector = False
ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
series_boundaries = [x0, xb]
series_data = [(ser, x0, xb)]
# We will be working with vectors of Taylor series
def mpolyval(ser, a):
return [ctx.polyval(s[::-1], a) for s in ser]
# Find nearest expansion point; compute if necessary
def get_series(x):
if x < x0:
raise ValueError
n = bisect(series_boundaries, x)
if n < len(series_boundaries):
return series_data[n-1]
while 1:
ser, xa, xb = series_data[-1]
if verbose:
print("Computing Taylor series for [%f, %f]" % (xa, xb))
y = mpolyval(ser, xb-xa)
xa = xb
ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
series_boundaries.append(xb)
series_data.append((ser, xa, xb))
if x <= xb:
return series_data[-1]
# Evaluation function
def interpolant(x):
x = ctx.convert(x)
orig = ctx.prec
try:
ctx.prec = workprec
ser, xa, xb = get_series(x)
y = mpolyval(ser, x-xa)
finally:
ctx.prec = orig
if return_vector:
return [+yk for yk in y]
else:
return +y[0]
return interpolant
ODEMethods.odefun = odefun
if __name__ == "__main__":
import doctest
doctest.testmod()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/odes.py
|
odes.py
|
from copy import copy
from ..libmp.backend import xrange
class OptimizationMethods(object):
def __init__(ctx):
pass
##############
# 1D-SOLVERS #
##############
class Newton:
"""
1d-solver generating pairs of approximative root and error.
Needs starting points x0 close to the root.
Pro:
* converges fast
* sometimes more robust than secant with bad second starting point
Contra:
* converges slowly for multiple roots
* needs first derivative
* 2 function evaluations per iteration
"""
maxsteps = 20
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if len(x0) == 1:
self.x0 = x0[0]
else:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.f = f
if not 'df' in kwargs:
def df(x):
return self.ctx.diff(f, x)
else:
df = kwargs['df']
self.df = df
def __iter__(self):
f = self.f
df = self.df
x0 = self.x0
while True:
x1 = x0 - f(x0) / df(x0)
error = abs(x1 - x0)
x0 = x1
yield (x1, error)
class Secant:
"""
1d-solver generating pairs of approximative root and error.
Needs starting points x0 and x1 close to the root.
x1 defaults to x0 + 0.25.
Pro:
* converges fast
Contra:
* converges slowly for multiple roots
"""
maxsteps = 30
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if len(x0) == 1:
self.x0 = x0[0]
self.x1 = self.x0 + 0.25
elif len(x0) == 2:
self.x0 = x0[0]
self.x1 = x0[1]
else:
raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
self.f = f
def __iter__(self):
f = self.f
x0 = self.x0
x1 = self.x1
f0 = f(x0)
while True:
f1 = f(x1)
l = x1 - x0
if not l:
break
s = (f1 - f0) / l
if not s:
break
x0, x1 = x1, x1 - f1/s
f0 = f1
yield x1, abs(l)
class MNewton:
"""
1d-solver generating pairs of approximative root and error.
Needs starting point x0 close to the root.
Uses modified Newton's method that converges fast regardless of the
multiplicity of the root.
Pro:
* converges fast for multiple roots
Contra:
* needs first and second derivative of f
* 3 function evaluations per iteration
"""
maxsteps = 20
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if not len(x0) == 1:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.x0 = x0[0]
self.f = f
if not 'df' in kwargs:
def df(x):
return self.ctx.diff(f, x)
else:
df = kwargs['df']
self.df = df
if not 'd2f' in kwargs:
def d2f(x):
return self.ctx.diff(df, x)
else:
d2f = kwargs['df']
self.d2f = d2f
def __iter__(self):
x = self.x0
f = self.f
df = self.df
d2f = self.d2f
while True:
prevx = x
fx = f(x)
if fx == 0:
break
dfx = df(x)
d2fx = d2f(x)
# x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
x -= fx / (dfx - fx * d2fx / dfx)
error = abs(x - prevx)
yield x, error
class Halley:
"""
1d-solver generating pairs of approximative root and error.
Needs a starting point x0 close to the root.
Uses Halley's method with cubic convergence rate.
Pro:
* converges even faster the Newton's method
* useful when computing with *many* digits
Contra:
* needs first and second derivative of f
* 3 function evaluations per iteration
* converges slowly for multiple roots
"""
maxsteps = 20
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if not len(x0) == 1:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.x0 = x0[0]
self.f = f
if not 'df' in kwargs:
def df(x):
return self.ctx.diff(f, x)
else:
df = kwargs['df']
self.df = df
if not 'd2f' in kwargs:
def d2f(x):
return self.ctx.diff(df, x)
else:
d2f = kwargs['df']
self.d2f = d2f
def __iter__(self):
x = self.x0
f = self.f
df = self.df
d2f = self.d2f
while True:
prevx = x
fx = f(x)
dfx = df(x)
d2fx = d2f(x)
x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
error = abs(x - prevx)
yield x, error
class Muller:
"""
1d-solver generating pairs of approximative root and error.
Needs starting points x0, x1 and x2 close to the root.
x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
Uses Muller's method that converges towards complex roots.
Pro:
* converges fast (somewhat faster than secant)
* can find complex roots
Contra:
* converges slowly for multiple roots
* may have complex values for real starting points and real roots
http://en.wikipedia.org/wiki/Muller's_method
"""
maxsteps = 30
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if len(x0) == 1:
self.x0 = x0[0]
self.x1 = self.x0 + 0.25
self.x2 = self.x1 + 0.25
elif len(x0) == 2:
self.x0 = x0[0]
self.x1 = x0[1]
self.x2 = self.x1 + 0.25
elif len(x0) == 3:
self.x0 = x0[0]
self.x1 = x0[1]
self.x2 = x0[2]
else:
raise ValueError('expected 1, 2 or 3 starting points, got %i'
% len(x0))
self.f = f
self.verbose = kwargs['verbose']
def __iter__(self):
f = self.f
x0 = self.x0
x1 = self.x1
x2 = self.x2
fx0 = f(x0)
fx1 = f(x1)
fx2 = f(x2)
while True:
# TODO: maybe refactoring with function for divided differences
# calculate divided differences
fx2x1 = (fx1 - fx2) / (x1 - x2)
fx2x0 = (fx0 - fx2) / (x0 - x2)
fx1x0 = (fx0 - fx1) / (x0 - x1)
w = fx2x1 + fx2x0 - fx1x0
fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
if w == 0 and fx2x1x0 == 0:
if self.verbose:
print('canceled with')
print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
break
x0 = x1
fx0 = fx1
x1 = x2
fx1 = fx2
# denominator should be as large as possible => choose sign
r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
if abs(w - r) > abs(w + r):
r = -r
x2 -= 2*fx2 / (w + r)
fx2 = f(x2)
error = abs(x2 - x1)
yield x2, error
# TODO: consider raising a ValueError when there's no sign change in a and b
class Bisection:
"""
1d-solver generating pairs of approximative root and error.
Uses bisection method to find a root of f in [a, b].
Might fail for multiple roots (needs sign change).
Pro:
* robust and reliable
Contra:
* converges slowly
* needs sign change
"""
maxsteps = 100
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if len(x0) != 2:
raise ValueError('expected interval of 2 points, got %i' % len(x0))
self.f = f
self.a = x0[0]
self.b = x0[1]
def __iter__(self):
f = self.f
a = self.a
b = self.b
l = b - a
fb = f(b)
while True:
m = self.ctx.ldexp(a + b, -1)
fm = f(m)
if fm * fb < 0:
a = m
else:
b = m
fb = fm
l /= 2
yield (a + b)/2, abs(l)
def _getm(method):
"""
Return a function to calculate m for Illinois-like methods.
"""
if method == 'illinois':
def getm(fz, fb):
return 0.5
elif method == 'pegasus':
def getm(fz, fb):
return fb/(fb + fz)
elif method == 'anderson':
def getm(fz, fb):
m = 1 - fz/fb
if m > 0:
return m
else:
return 0.5
else:
raise ValueError("method '%s' not recognized" % method)
return getm
class Illinois:
"""
1d-solver generating pairs of approximative root and error.
Uses Illinois method or similar to find a root of f in [a, b].
Might fail for multiple roots (needs sign change).
Combines bisect with secant (improved regula falsi).
The only difference between the methods is the scaling factor m, which is
used to ensure convergence (you can choose one using the 'method' keyword):
Illinois method ('illinois'):
m = 0.5
Pegasus method ('pegasus'):
m = fb/(fb + fz)
Anderson-Bjoerk method ('anderson'):
m = 1 - fz/fb if positive else 0.5
Pro:
* converges very fast
Contra:
* has problems with multiple roots
* needs sign change
"""
maxsteps = 30
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if len(x0) != 2:
raise ValueError('expected interval of 2 points, got %i' % len(x0))
self.a = x0[0]
self.b = x0[1]
self.f = f
self.tol = kwargs['tol']
self.verbose = kwargs['verbose']
self.method = kwargs.get('method', 'illinois')
self.getm = _getm(self.method)
if self.verbose:
print('using %s method' % self.method)
def __iter__(self):
method = self.method
f = self.f
a = self.a
b = self.b
fa = f(a)
fb = f(b)
m = None
while True:
l = b - a
if l == 0:
break
s = (fb - fa) / l
z = a - fa/s
fz = f(z)
if abs(fz) < self.tol:
# TODO: better condition (when f is very flat)
if self.verbose:
print('canceled with z =', z)
yield z, l
break
if fz * fb < 0: # root in [z, b]
a = b
fa = fb
b = z
fb = fz
else: # root in [a, z]
m = self.getm(fz, fb)
b = z
fb = fz
fa = m*fa # scale down to ensure convergence
if self.verbose and m and not method == 'illinois':
print('m:', m)
yield (a + b)/2, abs(l)
def Pegasus(*args, **kwargs):
"""
1d-solver generating pairs of approximative root and error.
Uses Pegasus method to find a root of f in [a, b].
Wrapper for illinois to use method='pegasus'.
"""
kwargs['method'] = 'pegasus'
return Illinois(*args, **kwargs)
def Anderson(*args, **kwargs):
"""
1d-solver generating pairs of approximative root and error.
Uses Anderson-Bjoerk method to find a root of f in [a, b].
Wrapper for illinois to use method='pegasus'.
"""
kwargs['method'] = 'anderson'
return Illinois(*args, **kwargs)
# TODO: check whether it's possible to combine it with Illinois stuff
class Ridder:
"""
1d-solver generating pairs of approximative root and error.
Ridders' method to find a root of f in [a, b].
Is told to perform as well as Brent's method while being simpler.
Pro:
* very fast
* simpler than Brent's method
Contra:
* two function evaluations per step
* has problems with multiple roots
* needs sign change
http://en.wikipedia.org/wiki/Ridders'_method
"""
maxsteps = 30
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
self.f = f
if len(x0) != 2:
raise ValueError('expected interval of 2 points, got %i' % len(x0))
self.x1 = x0[0]
self.x2 = x0[1]
self.verbose = kwargs['verbose']
self.tol = kwargs['tol']
def __iter__(self):
ctx = self.ctx
f = self.f
x1 = self.x1
fx1 = f(x1)
x2 = self.x2
fx2 = f(x2)
while True:
x3 = 0.5*(x1 + x2)
fx3 = f(x3)
x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
fx4 = f(x4)
if abs(fx4) < self.tol:
# TODO: better condition (when f is very flat)
if self.verbose:
print('canceled with f(x4) =', fx4)
yield x4, abs(x1 - x2)
break
if fx4 * fx2 < 0: # root in [x4, x2]
x1 = x4
fx1 = fx4
else: # root in [x1, x4]
x2 = x4
fx2 = fx4
error = abs(x1 - x2)
yield (x1 + x2)/2, error
class ANewton:
"""
EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
Uses Newton's method modified to use Steffensens method when convergence is
slow. (I.e. for multiple roots.)
"""
maxsteps = 20
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
if not len(x0) == 1:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.x0 = x0[0]
self.f = f
if not 'df' in kwargs:
def df(x):
return self.ctx.diff(f, x)
else:
df = kwargs['df']
self.df = df
def phi(x):
return x - f(x) / df(x)
self.phi = phi
self.verbose = kwargs['verbose']
def __iter__(self):
x0 = self.x0
f = self.f
df = self.df
phi = self.phi
error = 0
counter = 0
while True:
prevx = x0
try:
x0 = phi(x0)
except ZeroDivisionError:
if self.verbose:
'ZeroDivisionError: canceled with x =', x0
break
preverror = error
error = abs(prevx - x0)
# TODO: decide not to use convergence acceleration
if error and abs(error - preverror) / error < 1:
if self.verbose:
print('converging slowly')
counter += 1
if counter >= 3:
# accelerate convergence
phi = steffensen(phi)
counter = 0
if self.verbose:
print('accelerating convergence')
yield x0, error
# TODO: add Brent
############################
# MULTIDIMENSIONAL SOLVERS #
############################
def jacobian(ctx, f, x):
"""
Calculate the Jacobian matrix of a function at the point x0.
This is the first derivative of a vectorial function:
f : R^m -> R^n with m >= n
"""
x = ctx.matrix(x)
h = ctx.sqrt(ctx.eps)
fx = ctx.matrix(f(*x))
m = len(fx)
n = len(x)
J = ctx.matrix(m, n)
for j in xrange(n):
xj = x.copy()
xj[j] += h
Jj = (ctx.matrix(f(*xj)) - fx) / h
for i in xrange(m):
J[i,j] = Jj[i]
return J
# TODO: test with user-specified jacobian matrix, support force_type
class MDNewton:
"""
Find the root of a vector function numerically using Newton's method.
f is a vector function representing a nonlinear equation system.
x0 is the starting point close to the root.
J is a function returning the Jacobian matrix for a point.
Supports overdetermined systems.
Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
The function to calculate the Jacobian matrix can be given using the
keyword 'J'. Otherwise it will be calculated numerically.
Please note that this method converges only locally. Especially for high-
dimensional systems it is not trivial to find a good starting point being
close enough to the root.
It is recommended to use a faster, low-precision solver from SciPy [1] or
OpenOpt [2] to get an initial guess. Afterwards you can use this method for
root-polishing to any precision.
[1] http://scipy.org
[2] http://openopt.org
"""
maxsteps = 10
def __init__(self, ctx, f, x0, **kwargs):
self.ctx = ctx
self.f = f
if isinstance(x0, (tuple, list)):
x0 = ctx.matrix(x0)
assert x0.cols == 1, 'need a vector'
self.x0 = x0
if 'J' in kwargs:
self.J = kwargs['J']
else:
def J(*x):
return ctx.jacobian(f, x)
self.J = J
self.norm = kwargs['norm']
self.verbose = kwargs['verbose']
def __iter__(self):
f = self.f
x0 = self.x0
norm = self.norm
J = self.J
fx = self.ctx.matrix(f(*x0))
fxnorm = norm(fx)
cancel = False
while not cancel:
# get direction of descent
fxn = -fx
Jx = J(*x0)
s = self.ctx.lu_solve(Jx, fxn)
if self.verbose:
print('Jx:')
print(Jx)
print('s:', s)
# damping step size TODO: better strategy (hard task)
l = self.ctx.one
x1 = x0 + s
while True:
if x1 == x0:
if self.verbose:
print("canceled, won't get more excact")
cancel = True
break
fx = self.ctx.matrix(f(*x1))
newnorm = norm(fx)
if newnorm < fxnorm:
# new x accepted
fxnorm = newnorm
x0 = x1
break
l /= 2
x1 = x0 + l*s
yield (x0, fxnorm)
#############
# UTILITIES #
#############
str2solver = {'newton':Newton, 'secant':Secant,'mnewton':MNewton,
'halley':Halley, 'muller':Muller, 'bisect':Bisection,
'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
def findroot(ctx, f, x0, solver=Secant, tol=None, verbose=False, verify=True, **kwargs):
r"""
Find a solution to `f(x) = 0`, using *x0* as starting point or
interval for *x*.
Multidimensional overdetermined systems are supported.
You can specify them using a function or a list of functions.
If the found root does not satisfy `|f(x)^2 < \mathrm{tol}|`,
an exception is raised (this can be disabled with *verify=False*).
**Arguments**
*f*
one dimensional function
*x0*
starting point, several starting points or interval (depends on solver)
*tol*
the returned solution has an error smaller than this
*verbose*
print additional information for each iteration if true
*verify*
verify the solution and raise a ValueError if `|f(x) > \mathrm{tol}|`
*solver*
a generator for *f* and *x0* returning approximative solution and error
*maxsteps*
after how many steps the solver will cancel
*df*
first derivative of *f* (used by some solvers)
*d2f*
second derivative of *f* (used by some solvers)
*multidimensional*
force multidimensional solving
*J*
Jacobian matrix of *f* (used by multidimensional solvers)
*norm*
used vector norm (used by multidimensional solvers)
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
yielding pairs of approximative solution and estimated error (which is
expected to be positive).
You can use the following string aliases:
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
'ridder', 'anewton', 'bisect'
See mpmath.optimization for their documentation.
**Examples**
The function :func:`~mpmath.findroot` locates a root of a given function using the
secant method by default. A simple example use of the secant method is to
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> findroot(sin, 3)
3.14159265358979323846264338328
The secant method can be used to find complex roots of analytic functions,
although it must in that case generally be given a nonreal starting value
(or else it will never leave the real line)::
>>> mp.dps = 15
>>> findroot(lambda x: x**3 + 2*x + 1, j)
(0.226698825758202 + 1.46771150871022j)
A nice application is to compute nontrivial roots of the Riemann zeta
function with many digits (good initial values are needed for convergence)::
>>> mp.dps = 30
>>> findroot(zeta, 0.5+14j)
(0.5 + 14.1347251417346937904572519836j)
The secant method can also be used as an optimization algorithm, by passing
it a derivative of a function. The following example locates the positive
minimum of the gamma function::
>>> mp.dps = 20
>>> findroot(lambda x: diff(gamma, x), 1)
1.4616321449683623413
Finally, a useful application is to compute inverse functions, such as the
Lambert W function which is the inverse of `w e^w`, given the first
term of the solution's asymptotic expansion as the initial value. In basic
cases, this gives identical results to mpmath's built-in ``lambertw``
function::
>>> def lambert(x):
... return findroot(lambda w: w*exp(w) - x, log(1+x))
...
>>> mp.dps = 15
>>> lambert(1); lambertw(1)
0.567143290409784
0.567143290409784
>>> lambert(1000); lambert(1000)
5.2496028524016
5.2496028524016
Multidimensional functions are also supported::
>>> f = [lambda x1, x2: x1**2 + x2,
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
>>> findroot(f, (0, 0))
[-0.618033988749895]
[-0.381966011250105]
>>> findroot(f, (10, 10))
[ 1.61803398874989]
[-2.61803398874989]
You can verify this by solving the system manually.
Please note that the following (more general) syntax also works::
>>> def f(x1, x2):
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
...
>>> findroot(f, (0, 0))
[-0.618033988749895]
[-0.381966011250105]
**Multiple roots**
For multiple roots all methods of the Newtonian family (including secant)
converge slowly. Consider this example::
>>> f = lambda x: (x - 1)**99
>>> findroot(f, 0.9, verify=False)
0.918073542444929
Even for a very close starting point the secant method converges very
slowly. Use ``verbose=True`` to illustrate this.
It is possible to modify Newton's method to make it converge regardless of
the root's multiplicity::
>>> findroot(f, -10, solver='mnewton')
1.0
This variant uses the first and second derivative of the function, which is
not very efficient.
Alternatively you can use an experimental Newtonian solver that keeps track
of the speed of convergence and accelerates it using Steffensen's method if
necessary::
>>> findroot(f, -10, solver='anewton', verbose=True)
x: -9.88888888888888888889
error: 0.111111111111111111111
converging slowly
x: -9.77890011223344556678
error: 0.10998877665544332211
converging slowly
x: -9.67002233332199662166
error: 0.108877778911448945119
converging slowly
accelerating convergence
x: -9.5622443299551077669
error: 0.107778003366888854764
converging slowly
x: 0.99999999999999999214
error: 10.562244329955107759
x: 1.0
error: 7.8598304758094664213e-18
1.0
**Complex roots**
For complex roots it's recommended to use Muller's method as it converges
even for real starting points very fast::
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
(0.727136084491197 + 0.934099289460529j)
**Intersection methods**
When you need to find a root in a known interval, it's highly recommended to
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
Usually they converge faster and more reliable. They have however problems
with multiple roots and usually need a sign change to find a root::
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
0.0
Be careful with symmetric functions::
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
Traceback (most recent call last):
...
ZeroDivisionError
It fails even for better starting points, because there is no sign change::
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1 > 2.1684e-19)
Try another starting point or tweak arguments.
"""
prec = ctx.prec
try:
ctx.prec += 20
# initialize arguments
if tol is None:
tol = ctx.eps * 2**10
kwargs['verbose'] = kwargs.get('verbose', verbose)
if 'd1f' in kwargs:
kwargs['df'] = kwargs['d1f']
kwargs['tol'] = tol
if isinstance(x0, (list, tuple)):
x0 = [ctx.convert(x) for x in x0]
else:
x0 = [ctx.convert(x0)]
if isinstance(solver, str):
try:
solver = str2solver[solver]
except KeyError:
raise ValueError('could not recognize solver')
# accept list of functions
if isinstance(f, (list, tuple)):
f2 = copy(f)
def tmp(*args):
return [fn(*args) for fn in f2]
f = tmp
# detect multidimensional functions
try:
fx = f(*x0)
multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
except TypeError:
fx = f(x0[0])
multidimensional = False
if 'multidimensional' in kwargs:
multidimensional = kwargs['multidimensional']
if multidimensional:
# only one multidimensional solver available at the moment
solver = MDNewton
if not 'norm' in kwargs:
norm = lambda x: ctx.norm(x, 'inf')
kwargs['norm'] = norm
else:
norm = kwargs['norm']
else:
norm = abs
# happily return starting point if it's a root
if norm(fx) == 0:
if multidimensional:
return ctx.matrix(x0)
else:
return x0[0]
# use solver
iterations = solver(ctx, f, x0, **kwargs)
if 'maxsteps' in kwargs:
maxsteps = kwargs['maxsteps']
else:
maxsteps = iterations.maxsteps
i = 0
for x, error in iterations:
if verbose:
print('x: ', x)
print('error:', error)
i += 1
if error < tol * max(1, norm(x)) or i >= maxsteps:
break
if not isinstance(x, (list, tuple, ctx.matrix)):
xl = [x]
else:
xl = x
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
raise ValueError('Could not find root within given tolerance. '
'(%g > %g)\n'
'Try another starting point or tweak arguments.'
% (norm(f(*xl))**2, tol))
return x
finally:
ctx.prec = prec
def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
"""
Return the multiplicity of a given root of f.
Internally, numerical derivatives are used. This might be inefficient for
higher order derviatives. Due to this, ``multiplicity`` cancels after
evaluating 10 derivatives by default. You can be specify the n-th derivative
using the dnf keyword.
>>> from mpmath import *
>>> multiplicity(lambda x: sin(x) - 1, pi/2)
2
"""
if tol is None:
tol = ctx.eps ** 0.8
kwargs['d0f'] = f
for i in xrange(maxsteps):
dfstr = 'd' + str(i) + 'f'
if dfstr in kwargs:
df = kwargs[dfstr]
else:
df = lambda x: ctx.diff(f, x, i)
if not abs(df(root)) < tol:
break
return i
def steffensen(f):
"""
linear convergent function -> quadratic convergent function
Steffensen's method for quadratic convergence of a linear converging
sequence.
Don not use it for higher rates of convergence.
It may even work for divergent sequences.
Definition:
F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
Example
.......
You can use Steffensen's method to accelerate a fixpoint iteration of linear
(or less) convergence.
x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
phi(x) = x**2 there are two fixpoints: 0 and 1.
Let's try Steffensen's method:
>>> f = lambda x: x**2
>>> from mpmath.optimization import steffensen
>>> F = steffensen(f)
>>> for x in [0.5, 0.9, 2.0]:
... fx = Fx = x
... for i in xrange(10):
... try:
... fx = f(fx)
... except OverflowError:
... pass
... try:
... Fx = F(Fx)
... except ZeroDivisionError:
... pass
... print '%20g %20g' % (fx, Fx)
0.25 -0.5
0.0625 0.1
0.00390625 -0.0011236
1.52588e-005 1.41691e-009
2.32831e-010 -2.84465e-027
5.42101e-020 2.30189e-080
2.93874e-039 -1.2197e-239
8.63617e-078 0
7.45834e-155 0
5.56268e-309 0
0.81 1.02676
0.6561 1.00134
0.430467 1
0.185302 1
0.0343368 1
0.00117902 1
1.39008e-006 1
1.93233e-012 1
3.73392e-024 1
1.39421e-047 1
4 1.6
16 1.2962
256 1.10194
65536 1.01659
4.29497e+009 1.00053
1.84467e+019 1
3.40282e+038 1
1.15792e+077 1
1.34078e+154 1
1.34078e+154 1
Unmodified, the iteration converges only towards 0. Modified it converges
not only much faster, it converges even to the repelling fixpoint 1.
"""
def F(x):
fx = f(x)
ffx = f(fx)
return (x*ffx - fx**2) / (ffx - 2*fx + x)
return F
OptimizationMethods.jacobian = jacobian
OptimizationMethods.findroot = findroot
OptimizationMethods.multiplicity = multiplicity
if __name__ == '__main__':
import doctest
doctest.testmod()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/optimization.py
|
optimization.py
|
from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10, error=False):
"""
Computes all roots (real or complex) of a given polynomial. The roots are
returned as a sorted list, where real roots appear first followed by
complex conjugate roots as adjacent elements. The polynomial should be
given as a list of coefficients, in the format used by :func:`~mpmath.polyval`.
The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)* where
*err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
Provided there are no repeated roots, :func:`~mpmath.polyroots` can typically
compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print r
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic,
any root found to have an imaginary part smaller than the estimated
numerical error is truncated to a real number. Real roots are placed
first in the returned list, sorted by value. The remaining complex
roots are sorted by real their parts so that conjugate roots end up
next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to polyroots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
weps = +ctx.eps
try:
ctx.prec += 10
tol = ctx.eps * 128
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lambda x: ctx.polyval(coeffs, x)
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
if not abs(err[i]) < tol:
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
# Remove small imaginary parts
if cleanup:
for i in xrange(deg):
if abs(ctx._im(roots[i])) < weps:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < weps:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
finally:
ctx.prec = orig
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots]
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/polynomials.py
|
polynomials.py
|
from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Approximation methods #
#----------------------------------------------------------------------------#
# The Chebyshev approximation formula is given at:
# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
# The only major changes in the following code is that we return the
# expanded polynomial coefficients instead of Chebyshev coefficients,
# and that we automatically transform [a,b] -> [-1,1] and back
# for convenience.
# Coefficient in Chebyshev approximation
def chebcoeff(ctx,f,a,b,j,N):
s = ctx.mpf(0)
h = ctx.mpf(0.5)
for k in range(1, N+1):
t = ctx.cospi((k-h)/N)
s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
return 2*s/N
# Generate Chebyshev polynomials T_n(ax+b) in expanded form
def chebT(ctx, a=1, b=0):
Tb = [1]
yield Tb
Ta = [b, a]
while 1:
yield Ta
# Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
Tmp = [0] + [2*a*t for t in Ta]
for i, c in enumerate(Ta): Tmp[i] += 2*b*c
for i, c in enumerate(Tb): Tmp[i] -= c
Ta, Tb = Tmp, Ta
@defun
def chebyfit(ctx, f, interval, N, error=False):
r"""
Computes a polynomial of degree `N-1` that approximates the
given function `f` on the interval `[a, b]`. With ``error=True``,
:func:`~mpmath.chebyfit` also returns an accurate estimate of the
maximum absolute error; that is, the maximum value of
`|f(x) - P(x)|` for `x \in [a, b]`.
:func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
which gives a nearly optimal solution: that is, the maximum
error of the approximating polynomial is very close to
the smallest possible for any polynomial of the same degree.
Chebyshev approximation is very useful if one needs repeated
evaluation of an expensive function, such as function defined
implicitly by an integral or a differential equation. (For
example, it could be used to turn a slow mpmath function
into a fast machine-precision version of the same.)
**Examples**
Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
>>> nprint(poly)
[0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
>>> nprint(err, 12)
1.61351758081e-5
The polynomial can be evaluated using ``polyval``::
>>> nprint(polyval(poly, 1.6), 12)
-0.0291858904138
>>> nprint(cos(1.6), 12)
-0.0291995223013
Sampling the true error at 1000 points shows that the error
estimate generated by ``chebyfit`` is remarkably good::
>>> error = lambda x: abs(cos(x) - polyval(poly, x))
>>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
1.61349954245e-5
**Choice of degree**
The degree `N` can be set arbitrarily high, to obtain an
arbitrarily good approximation. As a rule of thumb, an
`N`-term Chebyshev approximation is good to `N/(b-a)` decimal
places on a unit interval (although this depends on how
well-behaved `f` is). The cost grows accordingly: ``chebyfit``
evaluates the function `(N^2)/2` times to compute the
coefficients and an additional `N` times to estimate the error.
**Possible issues**
One should be careful to use a sufficiently high working
precision both when calling ``chebyfit`` and when evaluating
the resulting polynomial, as the polynomial is sometimes
ill-conditioned. It is for example difficult to reach
15-digit accuracy when evaluating the polynomial using
machine precision floats, no matter the theoretical
accuracy of the polynomial. (The option to return the
coefficients in Chebyshev form should be made available
in the future.)
It is important to note the Chebyshev approximation works
poorly if `f` is not smooth. A function containing singularities,
rapid oscillation, etc can be approximated more effectively by
multiplying it by a weight function that cancels out the
nonsmooth features, or by dividing the interval into several
segments.
"""
a, b = ctx._as_points(interval)
orig = ctx.prec
try:
ctx.prec = orig + int(N**0.5) + 20
c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
d = [ctx.zero] * N
d[0] = -c[0]/2
h = ctx.mpf(0.5)
T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
for (k, Tk) in zip(range(N), T):
for i in range(len(Tk)):
d[i] += c[k]*Tk[i]
d = d[::-1]
# Estimate maximum error
err = ctx.zero
for k in range(N):
x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
err = max(err, abs(f(x) - ctx.polyval(d, x)))
finally:
ctx.prec = orig
if error:
return d, +err
else:
return d
@defun
def fourier(ctx, f, interval, N):
r"""
Computes the Fourier series of degree `N` of the given function
on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
two lists `(c, s)` of coefficients (the cosine series and sine
series, respectively), such that
.. math ::
f(x) \sim \sum_{k=0}^N
c_k \cos(k m) + s_k \sin(k m)
where `m = 2 \pi / (b-a)`.
Note that many texts define the first coefficient as `2 c_0` instead
of `c_0`. The easiest way to evaluate the computed series correctly
is to pass it to :func:`~mpmath.fourierval`.
**Examples**
The function `f(x) = x` has a simple Fourier series on the standard
interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
the function has odd symmetry), and the sine coefficients are
rational numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> c, s = fourier(lambda x: x, [-pi, pi], 5)
>>> nprint(c)
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint(s)
[0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
This computes a Fourier series of a nonsymmetric function on
a nonstandard interval::
>>> I = [-1, 1.5]
>>> f = lambda x: x**2 - 4*x + 1
>>> cs = fourier(f, I, 4)
>>> nprint(cs[0])
[0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
>>> nprint(cs[1])
[0.0, -2.6255, 0.580905, 0.219974, -0.540057]
It is instructive to plot a function along with its truncated
Fourier series::
>>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
Fourier series generally converge slowly (and may not converge
pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
series gives an `L^2` error corresponding to 2-digit accuracy::
>>> I = [-1, 1]
>>> cs = fourier(cosh, I, 9)
>>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
>>> nprint(sqrt(quad(g, I)))
0.00467963
:func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
the accuracy (and speed) can be improved by including all singular
points in the interval specification::
>>> nprint(fourier(abs, [-1, 1], 0), 10)
([0.5000441648], [0.0])
>>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
([0.5], [0.0])
"""
interval = ctx._as_points(interval)
a = interval[0]
b = interval[-1]
L = b-a
cos_series = []
sin_series = []
cutoff = ctx.eps*10
for n in xrange(N+1):
m = 2*n*ctx.pi/L
an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
if n == 0:
an /= 2
if abs(an) < cutoff: an = ctx.zero
if abs(bn) < cutoff: bn = ctx.zero
cos_series.append(an)
sin_series.append(bn)
return cos_series, sin_series
@defun
def fourierval(ctx, series, interval, x):
"""
Evaluates a Fourier series (in the format computed by
by :func:`~mpmath.fourier` for the given interval) at the point `x`.
The series should be a pair `(c, s)` where `c` is the
cosine series and `s` is the sine series. The two lists
need not have the same length.
"""
cs, ss = series
ab = ctx._as_points(interval)
a = interval[0]
b = interval[-1]
m = 2*ctx.pi/(ab[-1]-ab[0])
s = ctx.zero
s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
return s
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/approximation.py
|
approximation.py
|
try:
from itertools import izip
except ImportError:
izip = zip
from ..libmp.backend import xrange
from .calculus import defun
try:
next = next
except NameError:
next = lambda _: _.next()
@defun
def richardson(ctx, seq):
r"""
Given a list ``seq`` of the first `N` elements of a slowly convergent
infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
Richardson extrapolate for the limit.
:func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
limit and `c` is the magnitude of the largest weight used during the
computation. The weight provides an estimate of the precision
lost to cancellation. Due to cancellation effects, the sequence must
be typically be computed at a much higher precision than the target
accuracy of the extrapolation.
**Applicability and issues**
The `N`-step Richardson extrapolation algorithm used by
:func:`~mpmath.richardson` is described in [1].
Richardson extrapolation only works for a specific type of sequence,
namely one converging like partial sums of
`P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
When the sequence does not convergence at such a rate
:func:`~mpmath.richardson` generally produces garbage.
Richardson extrapolation has the advantage of being fast: the `N`-term
extrapolate requires only `O(N)` arithmetic operations, and usually
produces an estimate that is accurate to `O(N)` digits. Contrast with
the Shanks transformation (see :func:`~mpmath.shanks`), which requires
`O(N^2)` operations.
:func:`~mpmath.richardson` is unable to produce an estimate for the
approximation error. One way to estimate the error is to perform
two extrapolations with slightly different `N` and comparing the
results.
Richardson extrapolation does not work for oscillating sequences.
As a simple workaround, :func:`~mpmath.richardson` detects if the last
three elements do not differ monotonically, and in that case
applies extrapolation only to the even-index elements.
**Example**
Applying Richardson extrapolation to the Leibniz series for `\pi`::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
... for m in range(1,30)]
>>> v, c = richardson(S[:10])
>>> v
3.2126984126984126984126984127
>>> nprint([v-pi, c])
[0.0711058, 2.0]
>>> v, c = richardson(S[:30])
>>> v
3.14159265468624052829954206226
>>> nprint([v-pi, c])
[1.09645e-9, 20833.3]
**References**
1. [BenderOrszag]_ pp. 375-376
"""
assert len(seq) >= 3
if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
seq = seq[::2]
N = len(seq)//2-1
s = ctx.zero
# The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
# To avoid repeated factorials, we simplify the quotient
# of successive weights to obtain a recurrence relation
c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
maxc = 1
for k in xrange(N+1):
s += c * seq[N+k]
maxc = max(abs(c), maxc)
c *= (k-N)*ctx.mpf(k+N+1)**N
c /= ((1+k)*ctx.mpf(k+N)**N)
return s, maxc
@defun
def shanks(ctx, seq, table=None, randomized=False):
r"""
Given a list ``seq`` of the first `N` elements of a slowly
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
transformation often provides strong convergence acceleration,
especially if the sequence is oscillating.
The iterated Shanks transformation is computed using the Wynn
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
epsilon table generated by Wynn's algorithm, which can be read
off as follows:
* The table is a list of lists forming a lower triangular matrix,
where higher row and column indices correspond to more accurate
values.
* The columns with even index hold dummy entries (required for the
computation) and the columns with odd index hold the actual
extrapolates.
* The last element in the last row is typically the most
accurate estimate of the limit.
* The difference to the third last element in the last row
provides an estimate of the approximation error.
* The magnitude of the second last element provides an estimate
of the numerical accuracy lost to cancellation.
For convenience, so the extrapolation is stopped at an odd index
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
limit.
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
This can be used to efficiently extend a previous computation after
new elements have been appended to the sequence. The table will
then be updated in-place.
**The Shanks transformation**
The Shanks transformation is defined as follows (see [2]): given
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
given by
.. math ::
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
The Shanks transformation gives the exact limit `A_{\infty}` in a
single step if `A_k = A + a q^k`. Note in particular that it
extrapolates the exact sum of a geometric series in a single step.
Applying the Shanks transformation once often improves convergence
substantially for an arbitrary sequence, but the optimal effect is
obtained by applying it iteratively:
`S(S(A_k)), S(S(S(A_k))), \ldots`.
Wynn's epsilon algorithm provides an efficient way to generate
the table of iterated Shanks transformations. It reduces the
computation of each element to essentially a single division, at
the cost of requiring dummy elements in the table. See [1] for
details.
**Precision issues**
Due to cancellation effects, the sequence must be typically be
computed at a much higher precision than the target accuracy
of the extrapolation.
If the Shanks transformation converges to the exact limit (such
as if the sequence is a geometric series), then a division by
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
terminating the iteration and returning the table it has
generated so far. With *randomized=True*, it will instead
replace the zero by a pseudorandom number close to zero.
(TODO: find a better solution to this problem.)
**Examples**
We illustrate by applying Shanks transformation to the Leibniz
series for `\pi`::
>>> from mpmath import *
>>> mp.dps = 50
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
... for m in range(1,30)]
>>>
>>> T = shanks(S[:7])
>>> for row in T:
... nprint(row)
...
[-0.75]
[1.25, 3.16667]
[-1.75, 3.13333, -28.75]
[2.25, 3.14524, 82.25, 3.14234]
[-2.75, 3.13968, -177.75, 3.14139, -969.937]
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
The extrapolated accuracy is about 4 digits, and about 4 digits
may have been lost due to cancellation::
>>> L = T[-1]
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
[2.22532e-5, 4.78309e-5, 3515.06]
Now we extend the computation::
>>> T = shanks(S[:25], T)
>>> L = T[-1]
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
[3.75527e-19, 1.48478e-19, 2.96014e+17]
The value for pi is now accurate to 18 digits. About 18 digits may
also have been lost to cancellation.
Here is an example with a geometric series, where the convergence
is immediate (the sum is exactly 1)::
>>> mp.dps = 15
>>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
... nprint(row)
[4.0]
[8.0, 1.0]
**References**
1. [GravesMorris]_
2. [BenderOrszag]_ pp. 368-375
"""
assert len(seq) >= 2
if table:
START = len(table)
else:
START = 0
table = []
STOP = len(seq) - 1
if STOP & 1:
STOP -= 1
one = ctx.one
eps = +ctx.eps
if randomized:
from random import Random
rnd = Random()
rnd.seed(START)
for i in xrange(START, STOP):
row = []
for j in xrange(i+1):
if j == 0:
a, b = 0, seq[i+1]-seq[i]
else:
if j == 1:
a = seq[i]
else:
a = table[i-1][j-2]
b = row[j-1] - table[i-1][j-1]
if not b:
if randomized:
b = rnd.getrandbits(10)*eps
elif i & 1:
return table[:-1]
else:
return table
row.append(a + one/b)
table.append(row)
return table
@defun
def sumap(ctx, f, interval, integral=None, error=False):
r"""
Evaluates an infinite series of an analytic summand *f* using the
Abel-Plana formula
.. math ::
\sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
the Abel-Plana formula does not require derivatives. However,
it only works when `|f(it)-f(-it)|` does not
increase too rapidly with `t`.
**Examples**
The Abel-Plana formula is particularly useful when the summand
decreases like a power of `k`; for example when the sum is a pure
zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sumap(lambda k: 1/k**2.5, [1,inf])
1.34148725725091717975677
>>> zeta(2.5)
1.34148725725091717975677
>>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
>>> zeta(2.5+2.5j, 1+1j)
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
If the series is alternating, numerical quadrature along the real
line is likely to give poor results, so it is better to evaluate
the first term symbolically whenever possible:
>>> n=3; z=-0.75
>>> I = expint(n,-log(z))
>>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
-0.6917036036904594510141448
>>> polylog(n,z)
-0.6917036036904594510141448
"""
prec = ctx.prec
try:
ctx.prec += 10
a, b = interval
assert b == ctx.inf
g = lambda x: f(x+a)
if integral is None:
i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
else:
i1, err1 = integral, 0
j = ctx.j
p = ctx.pi * 2
if ctx._is_real_type(i1):
h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
else:
h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
err = err1+err2
v = i1+i2+0.5*g(ctx.mpf(0))
finally:
ctx.prec = prec
if error:
return +v, err
return +v
@defun
def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
adiffs=None, bdiffs=None, verbose=False, error=False,
_fast_abort=False):
r"""
Uses the Euler-Maclaurin formula to compute an approximation accurate
to within ``tol`` (which defaults to the present epsilon) of the sum
.. math ::
S = \sum_{k=a}^b f(k)
where `(a,b)` are given by ``interval`` and `a` or `b` may be
infinite. The approximation is
.. math ::
S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
\sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
\left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
The last sum in the Euler-Maclaurin formula is not generally
convergent (a notable exception is if `f` is a polynomial, in
which case Euler-Maclaurin actually gives an exact result).
The summation is stopped as soon as the quotient between two
consecutive terms falls below *reject*. That is, by default
(*reject* = 10), the summation is continued as long as each
term adds at least one decimal.
Although not convergent, convergence to a given tolerance can
often be "forced" if `b = \infty` by summing up to `a+N` and then
applying the Euler-Maclaurin formula to the sum over the range
`(a+N+1, \ldots, \infty)`. This procedure is implemented by
:func:`~mpmath.nsum`.
By default numerical quadrature and differentiation is used.
If the symbolic values of the integral and endpoint derivatives
are known, it is more efficient to pass the value of the
integral explicitly as ``integral`` and the derivatives
explicitly as ``adiffs`` and ``bdiffs``. The derivatives
should be given as iterables that yield
`f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
**Examples**
Summation of an infinite series, with automatic and symbolic
integral and derivative values (the second should be much faster)::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> sumem(lambda n: 1/n**2, [32, inf])
0.03174336652030209012658168043874142714132886413417
>>> I = mpf(1)/32
>>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
>>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
0.03174336652030209012658168043874142714132886413417
An exact evaluation of a finite polynomial sum::
>>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
10500155000624963999742499550000.0
>>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
10500155000624963999742499550000
"""
tol = tol or +ctx.eps
interval = ctx._as_points(interval)
a = ctx.convert(interval[0])
b = ctx.convert(interval[-1])
err = ctx.zero
prev = 0
M = 10000
if a == ctx.ninf: adiffs = (0 for n in xrange(M))
else: adiffs = adiffs or ctx.diffs(f, a)
if b == ctx.inf: bdiffs = (0 for n in xrange(M))
else: bdiffs = bdiffs or ctx.diffs(f, b)
orig = ctx.prec
#verbose = 1
try:
ctx.prec += 10
s = ctx.zero
for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
if k & 1:
term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
mag = abs(term)
if verbose:
print("term", k, "magnitude =", ctx.nstr(mag))
if k > 4 and mag < tol:
s += term
break
elif k > 4 and abs(prev) / mag < reject:
err += mag
if _fast_abort:
return [s, (s, err)][error]
if verbose:
print("Failed to converge")
break
else:
s += term
prev = term
# Endpoint correction
if a != ctx.ninf: s += f(a)/2
if b != ctx.inf: s += f(b)/2
# Tail integral
if verbose:
print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
if integral:
s += integral
else:
integral, ierr = ctx.quad(f, interval, error=True)
if verbose:
print("Integration error:", ierr)
s += integral
err += ierr
finally:
ctx.prec = orig
if error:
return s, err
else:
return s
@defun
def adaptive_extrapolation(ctx, update, emfun, kwargs):
option = kwargs.get
if ctx._fixed_precision:
tol = option('tol', ctx.eps*2**10)
else:
tol = option('tol', ctx.eps/2**10)
verbose = option('verbose', False)
maxterms = option('maxterms', ctx.dps*10)
method = option('method', 'r+s').split('+')
skip = option('skip', 0)
steps = iter(option('steps', xrange(10, 10**9, 10)))
strict = option('strict')
#steps = (10 for i in xrange(1000))
if 'd' in method or 'direct' in method:
TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
else:
TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
TRY_SHANKS = ('s' in method) or ('shanks' in method)
TRY_EULER_MACLAURIN = ('e' in method) or \
('euler-maclaurin' in method)
last_richardson_value = 0
shanks_table = []
index = 0
step = 10
partial = []
best = ctx.zero
orig = ctx.prec
try:
if 'workprec' in kwargs:
ctx.prec = kwargs['workprec']
elif TRY_RICHARDSON or TRY_SHANKS:
ctx.prec = (ctx.prec+10) * 4
else:
ctx.prec += 30
while 1:
if index >= maxterms:
break
# Get new batch of terms
try:
step = next(steps)
except StopIteration:
pass
if verbose:
print("-"*70)
print("Adding terms #%i-#%i" % (index, index+step))
update(partial, xrange(index, index+step))
index += step
# Check direct error
best = partial[-1]
error = abs(best - partial[-2])
if verbose:
print("Direct error: %s" % ctx.nstr(error))
if error <= tol:
return best
# Check each extrapolation method
if TRY_RICHARDSON:
value, maxc = ctx.richardson(partial)
# Convergence
richardson_error = abs(value - last_richardson_value)
if verbose:
print("Richardson error: %s" % ctx.nstr(richardson_error))
# Convergence
if richardson_error <= tol:
return value
last_richardson_value = value
# Unreliable due to cancellation
if ctx.eps*maxc > tol:
if verbose:
print("Ran out of precision for Richardson")
TRY_RICHARDSON = False
if richardson_error < error:
error = richardson_error
best = value
if TRY_SHANKS:
shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
row = shanks_table[-1]
if len(row) == 2:
est1 = row[-1]
shanks_error = 0
else:
est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
shanks_error = abs(est1-est2)
if verbose:
print("Shanks error: %s" % ctx.nstr(shanks_error))
if shanks_error <= tol:
return est1
if ctx.eps*maxc > tol:
if verbose:
print("Ran out of precision for Shanks")
TRY_SHANKS = False
if shanks_error < error:
error = shanks_error
best = est1
if TRY_EULER_MACLAURIN:
if ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])).ae(-1):
if verbose:
print ("NOT using Euler-Maclaurin: the series appears"
" to be alternating, so numerical\n quadrature"
" will most likely fail")
TRY_EULER_MACLAURIN = False
else:
value, em_error = emfun(index, tol)
value += partial[-1]
if verbose:
print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
if em_error <= tol:
return value
if em_error < error:
best = value
finally:
ctx.prec = orig
if strict:
raise ctx.NoConvergence
if verbose:
print("Warning: failed to converge to target accuracy")
return best
@defun
def nsum(ctx, f, *intervals, **options):
r"""
Computes the sum
.. math :: S = \sum_{k=a}^b f(k)
where `(a, b)` = *interval*, and where `a = -\infty` and/or
`b = \infty` are allowed, or more generally
.. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
\sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
if multiple intervals are given.
Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
where the first converges rapidly and the second converges slowly,
are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nsum(lambda n: 1/fac(n), [0, inf])
2.71828182845905
>>> nsum(lambda n: 1/n**2, [1, inf])
1.64493406684823
When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
accurately estimate the sums of slowly convergent series. If the series is
finite, :func:`~mpmath.nsum` currently does not attempt to perform any
extrapolation, and simply calls :func:`~mpmath.fsum`.
Multidimensional infinite series are reduced to a single-dimensional
series over expanding hypercubes; if both infinite and finite dimensions
are present, the finite ranges are moved innermost. For more advanced
control over the summation order, use nested calls to :func:`~mpmath.nsum`,
or manually rewrite the sum as a single-dimensional series.
**Options**
*tol*
Desired maximum final error. Defaults roughly to the
epsilon of the working precision.
*method*
Which summation algorithm to use (described below).
Default: ``'richardson+shanks'``.
*maxterms*
Cancel after at most this many terms. Default: 10*dps.
*steps*
An iterable giving the number of terms to add between
each extrapolation attempt. The default sequence is
[10, 20, 30, 40, ...]. For example, if you know that
approximately 100 terms will be required, efficiency might be
improved by setting this to [100, 10]. Then the first
extrapolation will be performed after 100 terms, the second
after 110, etc.
*verbose*
Print details about progress.
*ignore*
If enabled, any term that raises ``ArithmeticError``
or ``ValueError`` (e.g. through division by zero) is replaced
by a zero. This is convenient for lattice sums with
a singular term near the origin.
**Methods**
Unfortunately, an algorithm that can efficiently sum any infinite
series does not exist. :func:`~mpmath.nsum` implements several different
algorithms that each work well in different cases. The *method*
keyword argument selects a method.
The default method is ``'r+s'``, i.e. both Richardson extrapolation
and Shanks transformation is attempted. A slower method that
handles more cases is ``'r+s+e'``. For very high precision
summation, or if the summation needs to be fast (for example if
multiple sums need to be evaluated), it is a good idea to
investigate which one method works best and only use that.
``'richardson'`` / ``'r'``:
Uses Richardson extrapolation. Provides useful extrapolation
when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
additional information.
``'shanks'`` / ``'s'``:
Uses Shanks transformation. Typically provides useful
extrapolation when `f(k) \sim c^k` or when successive terms
alternate signs. Is able to sum some divergent series.
See :func:`~mpmath.shanks` for additional information.
``'euler-maclaurin'`` / ``'e'``:
Uses the Euler-Maclaurin summation formula to approximate
the remainder sum by an integral. This requires high-order
numerical derivatives and numerical integration. The advantage
of this algorithm is that it works regardless of the
decay rate of `f`, as long as `f` is sufficiently smooth.
See :func:`~mpmath.sumem` for additional information.
``'direct'`` / ``'d'``:
Does not perform any extrapolation. This can be used
(and should only be used for) rapidly convergent series.
The summation automatically stops when the terms
decrease below the target tolerance.
**Basic examples**
A finite sum::
>>> nsum(lambda k: 1/k, [1, 6])
2.45
Summation of a series going to negative infinity and a doubly
infinite series::
>>> nsum(lambda k: 1/k**2, [-inf, -1])
1.64493406684823
>>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
3.15334809493716
:func:`~mpmath.nsum` handles sums of complex numbers::
>>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
(1.6 + 0.8j)
The following sum converges very rapidly, so it is most
efficient to sum it by disabling convergence acceleration::
>>> mp.dps = 1000
>>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
... method='direct')
>>> b = (cos(1)+sin(1))/4
>>> abs(a-b) < mpf('1e-998')
True
**Examples with Richardson extrapolation**
Richardson extrapolation works well for sums over rational
functions, as well as their alternating counterparts::
>>> mp.dps = 50
>>> nsum(lambda k: 1 / k**3, [1, inf],
... method='richardson')
1.2020569031595942853997381615114499907649862923405
>>> zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
... method='richardson')
2.9348022005446793094172454999380755676568497036204
>>> pi**2/2-2
2.9348022005446793094172454999380755676568497036204
>>> nsum(lambda k: (-1)**k / k**3, [1, inf],
... method='richardson')
-0.90154267736969571404980362113358749307373971925537
>>> -3*zeta(3)/4
-0.90154267736969571404980362113358749307373971925538
**Examples with Shanks transformation**
The Shanks transformation works well for geometric series
and typically provides excellent acceleration for Taylor
series near the border of their disk of convergence.
Here we apply it to a series for `\log(2)`, which can be
seen as the Taylor series for `\log(1+x)` with `x = 1`::
>>> nsum(lambda k: -(-1)**k/k, [1, inf],
... method='shanks')
0.69314718055994530941723212145817656807550013436025
>>> log(2)
0.69314718055994530941723212145817656807550013436025
Here we apply it to a slowly convergent geometric series::
>>> nsum(lambda k: mpf('0.995')**k, [0, inf],
... method='shanks')
200.0
Finally, Shanks' method works very well for alternating series
where `f(k) = (-1)^k g(k)`, and often does so regardless of
the exact decay rate of `g(k)`::
>>> mp.dps = 15
>>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
... method='shanks')
0.765147024625408
>>> (2-sqrt(2))*zeta(1.5)/2
0.765147024625408
The following slowly convergent alternating series has no known
closed-form value. Evaluating the sum a second time at higher
precision indicates that the value is probably correct::
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
... method='shanks')
0.924299897222939
>>> mp.dps = 30
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
... method='shanks')
0.92429989722293885595957018136
**Examples with Euler-Maclaurin summation**
The sum in the following example has the wrong rate of convergence
for either Richardson or Shanks to be effective.
>>> f = lambda k: log(k)/k**2.5
>>> mp.dps = 15
>>> nsum(f, [1, inf], method='euler-maclaurin')
0.38734195032621
>>> -diff(zeta, 2.5)
0.38734195032621
Increasing ``steps`` improves speed at higher precision::
>>> mp.dps = 50
>>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
0.38734195032620997271199237593105101319948228874688
>>> -diff(zeta, 2.5)
0.38734195032620997271199237593105101319948228874688
**Divergent series**
The Shanks transformation is able to sum some *divergent*
series. In particular, it is often able to sum Taylor series
beyond their radius of convergence (this is due to a relation
between the Shanks transformation and Pade approximations;
see :func:`~mpmath.pade` for an alternative way to evaluate divergent
Taylor series).
Here we apply it to `\log(1+x)` far outside the region of
convergence::
>>> mp.dps = 50
>>> nsum(lambda k: -(-9)**k/k, [1, inf],
... method='shanks')
2.3025850929940456840179914546843642076011014886288
>>> log(10)
2.3025850929940456840179914546843642076011014886288
A particular type of divergent series that can be summed
using the Shanks transformation is geometric series.
The result is the same as using the closed-form formula
for an infinite geometric series::
>>> mp.dps = 15
>>> for n in range(-8, 8):
... if n == 1:
... continue
... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
... nsum(lambda k: n**k, [0, inf], method='shanks')))
...
-8.0 0.111111111111111 0.111111111111111
-7.0 0.125 0.125
-6.0 0.142857142857143 0.142857142857143
-5.0 0.166666666666667 0.166666666666667
-4.0 0.2 0.2
-3.0 0.25 0.25
-2.0 0.333333333333333 0.333333333333333
-1.0 0.5 0.5
0.0 1.0 1.0
2.0 -1.0 -1.0
3.0 -0.5 -0.5
4.0 -0.333333333333333 -0.333333333333333
5.0 -0.25 -0.25
6.0 -0.2 -0.2
7.0 -0.166666666666667 -0.166666666666667
**Multidimensional sums**
Any combination of finite and infinite ranges is allowed for the
summation indices::
>>> mp.dps = 15
>>> nsum(lambda x,y: x+y, [2,3], [4,5])
28.0
>>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
6.0
>>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
6.0
>>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
7.0
>>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
7.0
>>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
7.0
Some nice examples of double series with analytic solutions or
reductions to single-dimensional series (see [1])::
>>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
1.60669515241529
>>> nsum(lambda n: 1/(2**n-1), [1,inf])
1.60669515241529
>>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
0.278070510848213
>>> pi*(pi-3*ln2)/12
0.278070510848213
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
0.129319852864168
>>> altzeta(2) - altzeta(1)
0.129319852864168
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
0.0790756439455825
>>> altzeta(3) - altzeta(2)
0.0790756439455825
>>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
... [1,inf], [1,inf])
0.28125
>>> mpf(9)/32
0.28125
>>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
... [1,inf], [1,inf], workprec=400)
1.64493406684823
>>> zeta(2)
1.64493406684823
A hard example of a multidimensional sum is the Madelung constant
in three dimensions (see [2]). The defining sum converges very
slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
obtain an accurate value through convergence acceleration. The
second evaluation below uses a much more efficient, rapidly
convergent 2D sum::
>>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
-1.74756459463318
>>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
-1.74756459463318
Another example of a lattice sum in 2D::
>>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
... [-inf,inf], ignore=True)
-2.1775860903036
>>> -pi*ln2
-2.1775860903036
An example of an Eisenstein series::
>>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
... ignore=True)
(3.1512120021539 + 0.0j)
**References**
1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
"""
infinite, g = standardize(ctx, f, intervals, options)
if not infinite:
return +g()
def update(partial_sums, indices):
if partial_sums:
psum = partial_sums[-1]
else:
psum = ctx.zero
for k in indices:
psum = psum + g(ctx.mpf(k))
partial_sums.append(psum)
prec = ctx.prec
def emfun(point, tol):
workprec = ctx.prec
ctx.prec = prec + 10
v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
ctx.prec = workprec
return v
return +ctx.adaptive_extrapolation(update, emfun, options)
def wrapsafe(f):
def g(*args):
try:
return f(*args)
except (ArithmeticError, ValueError):
return 0
return g
def standardize(ctx, f, intervals, options):
if options.get("ignore"):
f = wrapsafe(f)
finite = []
infinite = []
for k, points in enumerate(intervals):
a, b = ctx._as_points(points)
if b < a:
return False, (lambda: ctx.zero)
if a == ctx.ninf or b == ctx.inf:
infinite.append((k, (a,b)))
else:
finite.append((k, (int(a), int(b))))
if finite:
f = fold_finite(ctx, f, finite)
if not infinite:
return False, lambda: f(*([0]*len(intervals)))
if infinite:
f = standardize_infinite(ctx, f, infinite)
f = fold_infinite(ctx, f, infinite)
args = [0] * len(intervals)
d = infinite[0][0]
def g(k):
args[d] = k
return f(*args)
return True, g
# backwards compatible itertools.product
def cartesian_product(args):
pools = map(tuple, args)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def fold_finite(ctx, f, intervals):
if not intervals:
return f
indices = [v[0] for v in intervals]
points = [v[1] for v in intervals]
ranges = [xrange(a, b+1) for (a,b) in points]
def g(*args):
args = list(args)
s = ctx.zero
for xs in cartesian_product(ranges):
for dim, x in zip(indices, xs):
args[dim] = ctx.mpf(x)
s += f(*args)
return s
#print "Folded finite", indices
return g
# Standardize each interval to [0,inf]
def standardize_infinite(ctx, f, intervals):
if not intervals:
return f
dim, [a,b] = intervals[-1]
if a == ctx.ninf:
if b == ctx.inf:
def g(*args):
args = list(args)
k = args[dim]
if k:
s = f(*args)
args[dim] = -k
s += f(*args)
return s
else:
return f(*args)
else:
def g(*args):
args = list(args)
args[dim] = b - args[dim]
return f(*args)
else:
def g(*args):
args = list(args)
args[dim] += a
return f(*args)
#print "Standardized infinity along dimension", dim, a, b
return standardize_infinite(ctx, g, intervals[:-1])
def fold_infinite(ctx, f, intervals):
if len(intervals) < 2:
return f
dim1 = intervals[-2][0]
dim2 = intervals[-1][0]
# Assume intervals are [0,inf] x [0,inf] x ...
def g(*args):
args = list(args)
#args.insert(dim2, None)
n = int(args[dim1])
s = ctx.zero
#y = ctx.mpf(n)
args[dim2] = ctx.mpf(n) #y
for x in xrange(n+1):
args[dim1] = ctx.mpf(x)
s += f(*args)
args[dim1] = ctx.mpf(n) #ctx.mpf(n)
for y in xrange(n):
args[dim2] = ctx.mpf(y)
s += f(*args)
return s
#print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
return fold_infinite(ctx, g, intervals[:-1])
@defun
def nprod(ctx, f, interval, nsum=False, **kwargs):
r"""
Computes the product
.. math ::
P = \prod_{k=a}^b f(k)
where `(a, b)` = *interval*, and where `a = -\infty` and/or
`b = \infty` are allowed.
By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
:func:`~mpmath.nsum`, except applied to the partial products rather than
partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
supported. If ``nsum=True``, the product is instead computed via
:func:`~mpmath.nsum` as
.. math ::
P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
This is slower, but can sometimes yield better results. It is
also required (and used automatically) when Euler-Maclaurin
summation is requested.
**Examples**
A simple finite product::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> nprod(lambda k: k, [1, 4])
24.0
A large number of infinite products have known exact values,
and can therefore be used as a reference. Most of the following
examples are taken from MathWorld [1].
A few infinite products with simple values are::
>>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
3.141592653589793238462643
>>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
2.0
>>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
0.6666666666666666666666667
>>> nprod(lambda k: (1-1/k**2), [2, inf])
0.5
Next, several more infinite products with more complicated
values::
>>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
5.180668317897115748416626
5.180668317897115748416626
>>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
0.2720290549821331629502366
0.2720290549821331629502366
>>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
0.8480540493529003921296502
>>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
0.8480540493529003921296502
>>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
1.848936182858244485224927
>>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
1.848936182858244485224927
>>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
0.9190194775937444301739244
0.9190194775937444301739244
>>> nprod(lambda k: (1-1/k**6), [2, inf])
0.9826842777421925183244759
>>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
0.9826842777421925183244759
>>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
1.838038955187488860347849
1.838038955187488860347849
>>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
1.447255926890365298959138
>>> exp(1+euler/2)/sqrt(2*pi)
1.447255926890365298959138
The following two products are equivalent and can be evaluated in
terms of a Jacobi theta function. Pi can be replaced by any value
(as long as convergence is preserved)::
>>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
0.3838451207481672404778686
>>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
0.3838451207481672404778686
>>> jtheta(4,0,1/pi)
0.3838451207481672404778686
This product does not have a known closed form value::
>>> nprod(lambda k: (1-1/2**k), [1, inf])
0.2887880950866024212788997
A product taken from `-\infty`::
>>> nprod(lambda k: 1-k**(-3), [-inf,-2])
0.8093965973662901095786805
>>> cosh(pi*sqrt(3)/2)/(3*pi)
0.8093965973662901095786805
A doubly infinite product::
>>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
23.41432688231864337420035
>>> exp(pi/tanh(pi))
23.41432688231864337420035
A product requiring the use of Euler-Maclaurin summation to compute
an accurate value::
>>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
0.696155111336231052898125
**References**
1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
"""
if nsum or ('e' in kwargs.get('method', '')):
orig = ctx.prec
try:
# TODO: we are evaluating log(1+eps) -> eps, which is
# inaccurate. This currently works because nsum greatly
# increases the working precision. But we should be
# more intelligent and handle the precision here.
ctx.prec += 10
v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
finally:
ctx.prec = orig
return +ctx.exp(v)
a, b = ctx._as_points(interval)
if a == ctx.ninf:
if b == ctx.inf:
return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
return ctx.nprod(f, [-b, ctx.inf], **kwargs)
elif b != ctx.inf:
return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
a = int(a)
def update(partial_products, indices):
if partial_products:
pprod = partial_products[-1]
else:
pprod = ctx.one
for k in indices:
pprod = pprod * f(a + ctx.mpf(k))
partial_products.append(pprod)
return +ctx.adaptive_extrapolation(update, None, kwargs)
@defun
def limit(ctx, f, x, direction=1, exp=False, **kwargs):
r"""
Computes an estimate of the limit
.. math ::
\lim_{t \to x} f(t)
where `x` may be finite or infinite.
For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
consecutive integer values of `n`, where the approach direction
`d` may be specified using the *direction* keyword argument.
For infinite `x`, :func:`~mpmath.limit` evaluates values of
`f(\mathrm{sign}(x) \cdot n)`.
If the approach to the limit is not sufficiently fast to give
an accurate estimate directly, :func:`~mpmath.limit` attempts to find
the limit using Richardson extrapolation or the Shanks
transformation. You can select between these methods using
the *method* keyword (see documentation of :func:`~mpmath.nsum` for
more information).
**Options**
The following options are available with essentially the
same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
*steps*, *verbose*.
If the option *exp=True* is set, `f` will be
sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
This can sometimes improve the rate of convergence so that
:func:`~mpmath.limit` may return a more accurate answer (and faster).
However, do note that this can only be used if `f`
supports fast and accurate evaluation for arguments that
are extremely close to the limit point (or if infinite,
very large arguments).
**Examples**
A basic evaluation of a removable singularity::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> limit(lambda x: (x-sin(x))/x**3, 0)
0.166666666666666666666666666667
Computing the exponential function using its limit definition::
>>> limit(lambda n: (1+3/n)**n, inf)
20.0855369231876677409285296546
>>> exp(3)
20.0855369231876677409285296546
A limit for `\pi`::
>>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
>>> limit(f, inf)
3.14159265358979323846264338328
Calculating the coefficient in Stirling's formula::
>>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
2.50662827463100050241576528481
>>> sqrt(2*pi)
2.50662827463100050241576528481
Evaluating Euler's constant `\gamma` using the limit representation
.. math ::
\gamma = \lim_{n \rightarrow \infty } \left[ \left(
\sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
(which converges notoriously slowly)::
>>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
>>> limit(f, inf)
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
With default settings, the following limit converges too slowly
to be evaluated accurately. Changing to exponential sampling
however gives a perfect result::
>>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
>>> limit(f, inf)
0.992831158558330281129249686491
>>> limit(f, inf, exp=True)
1.0
"""
if ctx.isinf(x):
direction = ctx.sign(x)
g = lambda k: f(ctx.mpf(k+1)*direction)
else:
direction *= ctx.one
g = lambda k: f(x + direction/(k+1))
if exp:
h = g
g = lambda k: h(2**k)
def update(values, indices):
for k in indices:
values.append(g(k+1))
# XXX: steps used by nsum don't work well
if not 'steps' in kwargs:
kwargs['steps'] = [10]
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/extrapolation.py
|
extrapolation.py
|
import math
from ..libmp.backend import xrange
class QuadratureRule(object):
"""
Quadrature rules are implemented using this class, in order to
simplify the code and provide a common infrastructure
for tasks such as error estimation and node caching.
You can implement a custom quadrature rule by subclassing
:class:`QuadratureRule` and implementing the appropriate
methods. The subclass can then be used by :func:`~mpmath.quad` by
passing it as the *method* argument.
:class:`QuadratureRule` instances are supposed to be singletons.
:class:`QuadratureRule` therefore implements instance caching
in :func:`~mpmath.__new__`.
"""
def __init__(self, ctx):
self.ctx = ctx
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def clear(self):
"""
Delete cached node data.
"""
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def calc_nodes(self, degree, prec, verbose=False):
r"""
Compute nodes for the standard interval `[-1, 1]`. Subclasses
should probably implement only this method, and use
:func:`~mpmath.get_nodes` method to retrieve the nodes.
"""
raise NotImplementedError
def get_nodes(self, a, b, degree, prec, verbose=False):
"""
Return nodes for given interval, degree and precision. The
nodes are retrieved from a cache if already computed;
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
and are then cached.
Subclasses should probably not implement this method,
but just implement :func:`~mpmath.calc_nodes` for the actual
node computation.
"""
key = (a, b, degree, prec)
if key in self.transformed_cache:
return self.transformed_cache[key]
orig = self.ctx.prec
try:
self.ctx.prec = prec+20
# Get nodes on standard interval
if (degree, prec) in self.standard_cache:
nodes = self.standard_cache[degree, prec]
else:
nodes = self.calc_nodes(degree, prec, verbose)
self.standard_cache[degree, prec] = nodes
# Transform to general interval
nodes = self.transform_nodes(nodes, a, b, verbose)
if key in self.interval_count:
self.transformed_cache[key] = nodes
else:
self.interval_count[key] = True
finally:
self.ctx.prec = orig
return nodes
def transform_nodes(self, nodes, a, b, verbose=False):
r"""
Rescale standardized nodes (for `[-1, 1]`) to a general
interval `[a, b]`. For a finite interval, a simple linear
change of variables is used. Otherwise, the following
transformations are used:
.. math ::
[a, \infty] : t = \frac{1}{x} + (a-1)
[-\infty, b] : t = (b+1) - \frac{1}{x}
[-\infty, \infty] : t = \frac{x}{\sqrt{1-x^2}}
"""
ctx = self.ctx
a = ctx.convert(a)
b = ctx.convert(b)
one = ctx.one
if (a, b) == (-one, one):
return nodes
half = ctx.mpf(0.5)
new_nodes = []
if ctx.isinf(a) or ctx.isinf(b):
if (a, b) == (ctx.ninf, ctx.inf):
p05 = -half
for x, w in nodes:
x2 = x*x
px1 = one-x2
spx1 = px1**p05
x = x*spx1
w *= spx1/px1
new_nodes.append((x, w))
elif a == ctx.ninf:
b1 = b+1
for x, w in nodes:
u = 2/(x+one)
x = b1-u
w *= half*u**2
new_nodes.append((x, w))
elif b == ctx.inf:
a1 = a-1
for x, w in nodes:
u = 2/(x+one)
x = a1+u
w *= half*u**2
new_nodes.append((x, w))
elif a == ctx.inf or b == ctx.ninf:
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
else:
raise NotImplementedError
else:
# Simple linear change of variables
C = (b-a)/2
D = (b+a)/2
for x, w in nodes:
new_nodes.append((D+C*x, C*w))
return new_nodes
def guess_degree(self, prec):
"""
Given a desired precision `p` in bits, estimate the degree `m`
of the quadrature required to accomplish full accuracy for
typical integrals. By default, :func:`~mpmath.quad` will perform up
to `m` iterations. The value of `m` should be a slight
overestimate, so that "slightly bad" integrals can be dealt
with automatically using a few extra iterations. On the
other hand, it should not be too big, so :func:`~mpmath.quad` can
quit within a reasonable amount of time when it is given
an "unsolvable" integral.
The default formula used by :func:`~mpmath.guess_degree` is tuned
for both :class:`TanhSinh` and :class:`GaussLegendre`.
The output is roughly as follows:
+---------+---------+
| `p` | `m` |
+=========+=========+
| 50 | 6 |
+---------+---------+
| 100 | 7 |
+---------+---------+
| 500 | 10 |
+---------+---------+
| 3000 | 12 |
+---------+---------+
This formula is based purely on a limited amount of
experimentation and will sometimes be wrong.
"""
# Expected degree
# XXX: use mag
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
# Reasonable "worst case"
g += 2
return g
def estimate_error(self, results, prec, epsilon):
r"""
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
the error of `I_k`.
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
that each degree increment roughly doubles the accuracy of
the quadrature rule (this is true for both :class:`TanhSinh`
and :class:`GaussLegendre`). The extrapolation formula is given
by Borwein, Bailey & Girgensohn. Although not very conservative,
this method seems to be very robust in practice.
"""
if len(results) == 2:
return abs(results[0]-results[1])
try:
if results[-1] == results[-2] == results[-3]:
return self.ctx.zero
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
except ValueError:
return epsilon
D3 = -prec
D4 = min(0, max(D1**2/D2, 2*D1, D3))
return self.ctx.mpf(10) ** int(D4)
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
"""
Main integration function. Computes the 1D integral over
the interval specified by *points*. For each subinterval,
performs quadrature of degree from 1 up to *max_degree*
until :func:`~mpmath.estimate_error` signals convergence.
:func:`~mpmath.summation` transforms each subintegration to
the standard interval and then calls :func:`~mpmath.sum_next`.
"""
ctx = self.ctx
I = err = ctx.zero
for i in xrange(len(points)-1):
a, b = points[i], points[i+1]
if a == b:
continue
# XXX: we could use a single variable transformation,
# but this is not good in practice. We get better accuracy
# by having 0 as an endpoint.
if (a, b) == (ctx.ninf, ctx.inf):
_f = f
f = lambda x: _f(-x) + _f(x)
a, b = (ctx.zero, ctx.inf)
results = []
for degree in xrange(1, max_degree+1):
nodes = self.get_nodes(a, b, degree, prec, verbose)
if verbose:
print("Integrating from %s to %s (degree %s of %s)" % \
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
if degree > 1:
err = self.estimate_error(results, prec, epsilon)
if err <= epsilon:
break
if verbose:
print("Estimated error:", ctx.nstr(err))
I += results[-1]
if err > epsilon:
if verbose:
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(err))
return I, err
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
r"""
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
contains the `(w_k, x_k)` pairs.
:func:`~mpmath.summation` will supply the list *results* of
values computed by :func:`~mpmath.sum_next` at previous degrees, in
case the quadrature rule is able to reuse them.
"""
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
class TanhSinh(QuadratureRule):
r"""
This class implements "tanh-sinh" or "doubly exponential"
quadrature. This quadrature rule is based on the Euler-Maclaurin
integral formula. By performing a change of variables involving
nested exponentials / hyperbolic functions (hence the name), the
derivatives at the endpoints vanish rapidly. Since the error term
in the Euler-Maclaurin formula depends on the derivatives at the
endpoints, a simple step sum becomes extremely accurate. In
practice, this means that doubling the number of evaluation
points roughly doubles the number of accurate digits.
Comparison to Gauss-Legendre:
* Initial computation of nodes is usually faster
* Handles endpoint singularities better
* Handles infinite integration intervals better
* Is slower for smooth integrands once nodes have been computed
The implementation of the tanh-sinh algorithm is based on the
description given in Borwein, Bailey & Girgensohn, "Experimentation
in Mathematics - Computational Paths to Discovery", A K Peters,
2003, pages 312-313. In the present implementation, a few
improvements have been made:
* A more efficient scheme is used to compute nodes (exploiting
recurrence for the exponential function)
* The nodes are computed successively instead of all at once
Various documents describing the algorithm are available online, e.g.:
* http://crd.lbl.gov/~dhbailey/dhbpapers/dhb-tanh-sinh.pdf
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
"""
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
"""
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
fact that half of the abscissas at degree `m` are precisely the
abscissas from degree `m-1`. Thus reusing the result from
the previous level allows a 2x speedup.
"""
h = self.ctx.mpf(2)**(-degree)
# Abscissas overlap, so reusing saves half of the time
if previous:
S = previous[-1]/(h*2)
else:
S = self.ctx.zero
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
return h*S
def calc_nodes(self, degree, prec, verbose=False):
r"""
The abscissas and weights for tanh-sinh quadrature of degree
`m` are given by
.. math::
x_k = \tanh(\pi/2 \sinh(t_k))
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
list of nodes is actually infinite, but the weights die off so
rapidly that only a few are needed.
"""
ctx = self.ctx
nodes = []
extra = 20
ctx.prec += extra
tol = ctx.ldexp(1, -prec-10)
pi4 = ctx.pi/4
# For simplicity, we work in steps h = 1/2^n, with the first point
# offset so that we can reuse the sum from the previous degree
# We define degree 1 to include the "degree 0" steps, including
# the point x = 0. (It doesn't work well otherwise; not sure why.)
t0 = ctx.ldexp(1, -degree)
if degree == 1:
#nodes.append((mpf(0), pi4))
#nodes.append((-mpf(0), pi4))
nodes.append((ctx.zero, ctx.pi/2))
h = t0
else:
h = t0*2
# Since h is fixed, we can compute the next exponential
# by simply multiplying by exp(h)
expt0 = ctx.exp(t0)
a = pi4 * expt0
b = pi4 / expt0
udelta = ctx.exp(h)
urdelta = 1/udelta
for k in xrange(0, 20*2**degree+1):
# Reference implementation:
# t = t0 + k*h
# x = tanh(pi/2 * sinh(t))
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
c = ctx.exp(a-b)
d = 1/c
co = (c+d)/2
si = (c-d)/2
x = si / co
w = (a+b) / co**2
diff = abs(x-1)
if diff <= tol:
break
nodes.append((x, w))
nodes.append((-x, w))
a *= udelta
b *= urdelta
if verbose and k % 300 == 150:
# Note: the number displayed is rather arbitrary. Should
# figure out how to print something that looks more like a
# percentage
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
ctx.prec -= extra
return nodes
class GaussLegendre(QuadratureRule):
"""
This class implements Gauss-Legendre quadrature, which is
exceptionally efficient for polynomials and polynomial-like (i.e.
very smooth) integrands.
The abscissas and weights are given by roots and values of
Legendre polynomials, which are the orthogonal polynomials
on `[-1, 1]` with respect to the unit weight
(see :func:`~mpmath.legendre`).
In this implementation, we take the "degree" `m` of the quadrature
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
than linear, convergence as the degree is incremented.
Comparison to tanh-sinh quadrature:
* Is faster for smooth integrands once nodes have been computed
* Initial computation of nodes is usually slower
* Handles endpoint singularities worse
* Handles infinite integration intervals worse
"""
def calc_nodes(self, degree, prec, verbose=False):
"""
Calculates the abscissas and weights for Gauss-Legendre
quadrature of degree of given degree (actually `3 \cdot 2^m`).
"""
ctx = self.ctx
# It is important that the epsilon is set lower than the
# "real" epsilon
epsilon = ctx.ldexp(1, -prec-8)
# Fairly high precision might be required for accurate
# evaluation of the roots
orig = ctx.prec
ctx.prec = int(prec*1.5)
if degree == 1:
x = ctx.mpf(3)/5
w = ctx.mpf(5)/9
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
ctx.prec = orig
return nodes
nodes = []
n = 3*2**(degree-1)
upto = n//2 + 1
for j in xrange(1, upto):
# Asymptotic formula for the roots
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
# Newton iteration
while 1:
t1, t2 = 1, 0
# Evaluates the Legendre polynomial using its defining
# recurrence relation
for j1 in xrange(1,n+1):
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
t4 = n*(r*t1- t2)/(r**2-1)
t5 = r
a = t1/t4
r = r - a
if abs(a) < epsilon:
break
x = r
w = 2/((1-r**2)*t4**2)
if verbose and j % 30 == 15:
print("Computing nodes (%i of %i)" % (j, upto))
nodes.append((x, w))
nodes.append((-x, w))
ctx.prec = orig
return nodes
class QuadratureMethods:
def __init__(ctx, *args, **kwargs):
ctx._gauss_legendre = GaussLegendre(ctx)
ctx._tanh_sinh = TanhSinh(ctx)
def quad(ctx, f, *points, **kwargs):
r"""
Computes a single, double or triple integral over a given
1D interval, 2D rectangle, or 3D cuboid. A basic example::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(sin, [0, pi])
2.0
A basic 2D integral::
>>> f = lambda x, y: cos(x+y/2)
>>> quad(f, [-pi/2, pi/2], [0, pi])
4.0
**Interval format**
The integration range for each dimension may be specified
using a list or tuple. Arguments are interpreted as follows:
``quad(f, [x1, x2])`` -- calculates
`\int_{x_1}^{x_2} f(x) \, dx`
``quad(f, [x1, x2], [y1, y2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
\, dz \, dy \, dx`
Endpoints may be finite or infinite. An interval descriptor
may also contain more than two points. In this
case, the integration is split into subintervals, between
each pair of consecutive points. This is useful for
dealing with mid-interval discontinuities, or integrating
over large intervals where the function is irregular or
oscillates.
**Options**
:func:`~mpmath.quad` recognizes the following keyword arguments:
*method*
Chooses integration algorithm (described below).
*error*
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
integral and `e` is the estimated error.
*maxdegree*
Maximum degree of the quadrature rule to try before
quitting.
*verbose*
Print details about progress.
**Algorithms**
Mpmath presently implements two integration algorithms: tanh-sinh
quadrature and Gauss-Legendre quadrature. These can be selected
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
as shortcuts.
Both algorithms have the property that doubling the number of
evaluation points roughly doubles the accuracy, so both are ideal
for high precision quadrature (hundreds or thousands of digits).
At high precision, computing the nodes and weights for the
integration can be expensive (more expensive than computing the
function values). To make repeated integrations fast, nodes
are automatically cached.
The advantages of the tanh-sinh algorithm are that it tends to
handle endpoint singularities well, and that the nodes are cheap
to compute on the first run. For these reasons, it is used by
:func:`~mpmath.quad` as the default algorithm.
Gauss-Legendre quadrature often requires fewer function
evaluations, and is therefore often faster for repeated use, but
the algorithm does not handle endpoint singularities as well and
the nodes are more expensive to compute. Gauss-Legendre quadrature
can be a better choice if the integrand is smooth and repeated
integrations are required (e.g. for multiple integrals).
See the documentation for :class:`TanhSinh` and
:class:`GaussLegendre` for additional details.
**Examples of 1D integrals**
Intervals may be infinite or half-infinite. The following two
examples evaluate the limits of the inverse tangent function
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
>>> mp.dps = 15
>>> quad(lambda x: 2/(x**2+1), [0, inf])
3.14159265358979
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
3.14159265358979
Integrals can typically be resolved to high precision.
The following computes 50 digits of `\pi` by integrating the
area of the half-circle defined by `x^2 + y^2 \le 1`,
`-1 \le x \le 1`, `y \ge 0`::
>>> mp.dps = 50
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
3.1415926535897932384626433832795028841971693993751
One can just as well compute 1000 digits (output truncated)::
>>> mp.dps = 1000
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
3.141592653589793238462643383279502884...216420198
Complex integrals are supported. The following computes
a residue at `z = 0` by integrating counterclockwise along the
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
>>> mp.dps = 15
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
(0.0 + 6.28318530717959j)
**Examples of 2D and 3D integrals**
Here are several nice examples of analytically solvable
2D integrals (taken from MathWorld [1]) that can be evaluated
to high precision fairly rapidly by :func:`~mpmath.quad`::
>>> mp.dps = 30
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
>>> quad(f, [0, 1], [0, 1])
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
>>> quad(f, [-1, 1], [-1, 1])
3.17343648530607134219175646705
>>> 4*log(2+sqrt(3))-2*pi/3
3.17343648530607134219175646705
>>> f = lambda x, y: 1/(1-x**2 * y**2)
>>> quad(f, [0, 1], [0, 1])
1.23370055013616982735431137498
>>> pi**2 / 8
1.23370055013616982735431137498
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
1.64493406684822643647241516665
>>> pi**2 / 6
1.64493406684822643647241516665
Multiple integrals may be done over infinite ranges::
>>> mp.dps = 15
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
0.367879441171442
>>> print(1/e)
0.367879441171442
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
For example, we can replicate the earlier example of calculating
`\pi` by integrating over the unit-circle, and actually use double
quadrature to actually measure the area circle::
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
>>> quad(f, [-1, 1])
3.14159265358979
Here is a simple triple integral::
>>> mp.dps = 15
>>> f = lambda x,y,z: x*y/(1+z)
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
0.101366277027041
>>> (log(3)-log(2))/4
0.101366277027041
**Singularities**
Both tanh-sinh and Gauss-Legendre quadrature are designed to
integrate smooth (infinitely differentiable) functions. Neither
algorithm copes well with mid-interval singularities (such as
mid-interval discontinuities in `f(x)` or `f'(x)`).
The best solution is to split the integral into parts::
>>> mp.dps = 15
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
3.99900894176779
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
4.0
The tanh-sinh rule often works well for integrands having a
singularity at one or both endpoints::
>>> mp.dps = 15
>>> quad(log, [0, 1], method='tanh-sinh') # Good
-1.0
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
-0.999932197413801
However, the result may still be inaccurate for some functions::
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
1.99999999946942
This problem is not due to the quadrature rule per se, but to
numerical amplification of errors in the nodes. The problem can be
circumvented by temporarily increasing the precision::
>>> mp.dps = 30
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
>>> mp.dps = 15
>>> +a
2.0
**Highly variable functions**
For functions that are smooth (in the sense of being infinitely
differentiable) but contain sharp mid-interval peaks or many
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
example, with default settings, :func:`~mpmath.quad` is able to integrate
`\sin(x)` accurately over an interval of length 100 but not over
length 1000::
>>> quad(sin, [0, 100]); 1-cos(100) # Good
0.137681127712316
0.137681127712316
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
-37.8587612408485
0.437620923709297
One solution is to break the integration into 10 intervals of
length 100::
>>> quad(sin, linspace(0, 1000, 10)) # Good
0.437620923709297
Another is to increase the degree of the quadrature::
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
0.437620923709297
Whether splitting the interval or increasing the degree is
more efficient differs from case to case. Another example is the
function `1/(1+x^2)`, which has a sharp peak centered around
`x = 0`::
>>> f = lambda x: 1/(1+x**2)
>>> quad(f, [-100, 100]) # Bad
3.64804647105268
>>> quad(f, [-100, 100], maxdegree=10) # Good
3.12159332021646
>>> quad(f, [-100, 0, 100]) # Also good
3.12159332021646
**References**
1. http://mathworld.wolfram.com/DoubleIntegral.html
"""
rule = kwargs.get('method', 'tanh-sinh')
if type(rule) is str:
if rule == 'tanh-sinh':
rule = ctx._tanh_sinh
elif rule == 'gauss-legendre':
rule = ctx._gauss_legendre
else:
raise ValueError("unknown quadrature rule: %s" % rule)
else:
rule = rule(ctx)
verbose = kwargs.get('verbose')
dim = len(points)
orig = prec = ctx.prec
epsilon = ctx.eps/8
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
points = [ctx._as_points(p) for p in points]
try:
ctx.prec += 20
if dim == 1:
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
elif dim == 2:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: f(x,y), \
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
elif dim == 3:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: \
rule.summation(lambda z: f(x,y,z), \
points[2], prec, epsilon, m)[0],
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
else:
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
finally:
ctx.prec = orig
if kwargs.get("error"):
return +v, err
return +v
def quadts(ctx, *args, **kwargs):
"""
Performs tanh-sinh quadrature. The call
quadts(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=TanhSinh)
For example, a single integral and a double integral:
quadts(lambda x: exp(cos(x)), [0, 1])
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'tanh-sinh'
return ctx.quad(*args, **kwargs)
def quadgl(ctx, *args, **kwargs):
"""
Performs Gauss-Legendre quadrature. The call
quadgl(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=GaussLegendre)
For example, a single integral and a double integral:
quadgl(lambda x: exp(cos(x)), [0, 1])
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'gauss-legendre'
return ctx.quad(*args, **kwargs)
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
r"""
Calculates
.. math ::
I = \int_a^b f(x) dx
where at least one of `a` and `b` is infinite and where
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
can also handle oscillatory integrals where the oscillation
rate is different from a pure sine or cosine wave.
In the standard case when `|a| < \infty, b = \infty`,
:func:`~mpmath.quadosc` works by evaluating the infinite series
.. math ::
I = \int_a^{x_1} f(x) dx +
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
where `x_k` are consecutive zeros (alternatively
some other periodic reference point) of `f(x)`.
Accordingly, :func:`~mpmath.quadosc` requires information about the
zeros of `f(x)`. For a periodic function, you can specify
the zeros by either providing the angular frequency `\omega`
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
specify the `n`-th zero by providing the *zeros* arguments.
Below is an example of each::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: sin(3*x)/(x**2+1)
>>> quadosc(f, [0,inf], omega=3)
0.37833007080198
>>> quadosc(f, [0,inf], period=2*pi/3)
0.37833007080198
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
0.37833007080198
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
0.37833007080198
Note that *zeros* was specified to multiply `n` by the
*half-period*, not the full period. In theory, it does not matter
whether each partial integral is done over a half period or a full
period. However, if done over half-periods, the infinite series
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
typically makes the extrapolation much more efficient.
Here is an example of an integration over the entire real line,
and a half-infinite integration starting at `-\infty`::
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
1.15572734979092
>>> pi/e
1.15572734979092
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
-0.0844109505595739
>>> cos(1)+si(1)-pi/2
-0.0844109505595738
Of course, the integrand may contain a complex exponential just as
well as a real sine or cosine::
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
(0.156410688228254 + 0.0j)
>>> pi/e**3
0.156410688228254
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
(0.00317486988463794 - 0.0447701735209082j)
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
(0.00317486988463794 - 0.0447701735209082j)
**Non-periodic functions**
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
strictly periodic, *omega* or *period* might not work, and it might
be necessary to use *zeros*.
A notable exception can be made for Bessel functions which, though not
periodic, are "asymptotically periodic" in a sufficiently strong sense
that the sum extrapolation will work out::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
More properly, one should provide the exact Bessel function zeros::
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
>>> quadosc(j0, [0, inf], zeros=j0zero)
1.0
For an example where *zeros* becomes necessary, consider the
complete Fresnel integrals
.. math ::
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
= \sqrt{\frac{\pi}{8}}.
Although the integrands do not decrease in magnitude as
`x \to \infty`, the integrals are convergent since the oscillation
rate increases (causing consecutive periods to asymptotically
cancel out). These integrals are virtually impossible to calculate
to any kind of accuracy using standard quadrature rules. However,
if one provides the correct asymptotic distribution of zeros
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
>>> mp.dps = 30
>>> f = lambda x: cos(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> f = lambda x: sin(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> sqrt(pi/8)
0.626657068657750125603941321203
(Interestingly, these integrals can still be evaluated if one
places some other constant than `\pi` in the square root sign.)
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
the inverse-function distribution `h^{-1}(x)`::
>>> mp.dps = 15
>>> f = lambda x: sin(exp(x))
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
-0.25024394235267
>>> pi/2-si(e)
-0.250243942352671
**Non-alternating functions**
If the integrand oscillates around a positive value, without
alternating signs, the extrapolation might fail. A simple trick
that sometimes works is to multiply or divide the frequency by 2::
>>> f = lambda x: 1/x**2+sin(x)/x**4
>>> quadosc(f, [1,inf], omega=1) # Bad
1.28642190869861
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
1.28652953559617
>>> 1+(cos(1)+ci(1)+sin(1))/6
1.28652953559617
**Fast decay**
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
integrands. If the integrand decreases exponentially or faster,
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
much faster than :func:`~mpmath.quadosc`)::
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
0.5
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
0.5
"""
a, b = ctx._as_points(interval)
a = ctx.convert(a)
b = ctx.convert(b)
if [omega, period, zeros].count(None) != 2:
raise ValueError( \
"must specify exactly one of omega, period, zeros")
if a == ctx.ninf and b == ctx.inf:
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
return s1 + s2
if a == ctx.ninf:
if zeros:
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
else:
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
if b != ctx.inf:
raise ValueError("quadosc requires an infinite integration interval")
if not zeros:
if omega:
period = 2*ctx.pi/omega
zeros = lambda n: n*period/2
#for n in range(1,10):
# p = zeros(n)
# if p > a:
# break
#if n >= 9:
# raise ValueError("zeros do not appear to be correctly indexed")
n = 1
s = ctx.quadgl(f, [a, zeros(n)])
def term(k):
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
s += ctx.nsum(term, [n, ctx.inf])
return s
if __name__ == '__main__':
import doctest
doctest.testmod()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/quadrature.py
|
quadrature.py
|
from ..libmp.backend import xrange
from .calculus import defun
try:
iteritems = dict.iteritems
except AttributeError:
iteritems = dict.items
#----------------------------------------------------------------------------#
# Differentiation #
#----------------------------------------------------------------------------#
@defun
def difference(ctx, s, n):
r"""
Given a sequence `(s_k)` containing at least `n+1` items, returns the
`n`-th forward difference,
.. math ::
\Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
"""
n = int(n)
d = ctx.zero
b = (-1) ** (n & 1)
for k in xrange(n+1):
d += b * s[k]
b = (b * (k-n)) // (k+1)
return d
def hsteps(ctx, f, x, n, prec, **options):
singular = options.get('singular')
addprec = options.get('addprec', 10)
direction = options.get('direction', 0)
workprec = (prec+2*addprec) * (n+1)
orig = ctx.prec
try:
ctx.prec = workprec
h = options.get('h')
if h is None:
if options.get('relative'):
hextramag = int(ctx.mag(x))
else:
hextramag = 0
h = ctx.ldexp(1, -prec-addprec-hextramag)
else:
h = ctx.convert(h)
# Directed: steps x, x+h, ... x+n*h
direction = options.get('direction', 0)
if direction:
h *= ctx.sign(direction)
steps = xrange(n+1)
norm = h
# Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
else:
steps = xrange(-n, n+1, 2)
norm = (2*h)
# Perturb
if singular:
x += 0.5*h
values = [f(x+k*h) for k in steps]
return values, norm, workprec
finally:
ctx.prec = orig
@defun
def diff(ctx, f, x, n=1, **options):
r"""
Numerically computes the derivative of `f`, `f'(x)`, or generally for
an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
A few basic examples are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> diff(lambda x: x**2 + x, 1.0)
3.0
>>> diff(lambda x: x**2 + x, 1.0, 2)
2.0
>>> diff(lambda x: x**2 + x, 1.0, 3)
0.0
>>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
[20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
and order `(n_1, \ldots, n_k)`, the partial derivative
`f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
2.75
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
3.0
**Options**
The following optional keyword arguments are recognized:
``method``
Supported methods are ``'step'`` or ``'quad'``: derivatives may be
computed using either a finite difference with a small step
size `h` (default), or numerical quadrature.
``direction``
Direction of finite difference: can be -1 for a left
difference, 0 for a central difference (default), or +1
for a right difference; more generally can be any complex number.
``addprec``
Extra precision for `h` used to account for the function's
sensitivity to perturbations (default = 10).
``relative``
Choose `h` relative to the magnitude of `x`, rather than an
absolute value; useful for large or tiny `x` (default = False).
``h``
As an alternative to ``addprec`` and ``relative``, manually
select the step size `h`.
``singular``
If True, evaluation exactly at the point `x` is avoided; this is
useful for differentiating functions with removable singularities.
Default = False.
``radius``
Radius of integration contour (with ``method = 'quad'``).
Default = 0.25. A larger radius typically is faster and more
accurate, but it must be chosen so that `f` has no
singularities within the radius from the evaluation point.
A finite difference requires `n+1` function evaluations and must be
performed at `(n+1)` times the target precision. Accordingly, `f` must
support fast evaluation at high precision.
With integration, a larger number of function evaluations is
required, but not much extra precision is required. For high order
derivatives, this method may thus be faster if f is very expensive to
evaluate at high precision.
**Further examples**
The direction option is useful for computing left- or right-sided
derivatives of nonsmooth functions::
>>> diff(abs, 0, direction=0)
0.0
>>> diff(abs, 0, direction=1)
1.0
>>> diff(abs, 0, direction=-1)
-1.0
More generally, if the direction is nonzero, a right difference
is computed where the step size is multiplied by sign(direction).
For example, with direction=+j, the derivative from the positive
imaginary direction will be computed::
>>> diff(abs, 0, direction=j)
(0.0 - 1.0j)
With integration, the result may have a small imaginary part
even even if the result is purely real::
>>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
(0.5 - 4.59...e-26j)
>>> chop(_)
0.5
Adding precision to obtain an accurate value::
>>> diff(cos, 1e-30)
0.0
>>> diff(cos, 1e-30, h=0.0001)
-9.99999998328279e-31
>>> diff(cos, 1e-30, addprec=100)
-1.0e-30
"""
partial = False
try:
orders = list(n)
x = list(x)
partial = True
except TypeError:
pass
if partial:
x = [ctx.convert(_) for _ in x]
return _partial_diff(ctx, f, x, orders, options)
method = options.get('method', 'step')
if n == 0 and method != 'quad' and not options.get('singular'):
return f(ctx.convert(x))
prec = ctx.prec
try:
if method == 'step':
values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
ctx.prec = workprec
v = ctx.difference(values, n) / norm**n
elif method == 'quad':
ctx.prec += 10
radius = ctx.convert(options.get('radius', 0.25))
def g(t):
rei = radius*ctx.expj(t)
z = x + rei
return f(z) / rei**n
d = ctx.quadts(g, [0, 2*ctx.pi])
v = d * ctx.factorial(n) / (2*ctx.pi)
else:
raise ValueError("unknown method: %r" % method)
finally:
ctx.prec = prec
return +v
def _partial_diff(ctx, f, xs, orders, options):
if not orders:
return f()
if not sum(orders):
return f(*xs)
i = 0
for i in range(len(orders)):
if orders[i]:
break
order = orders[i]
def fdiff_inner(*f_args):
def inner(t):
return f(*(f_args[:i] + (t,) + f_args[i+1:]))
return ctx.diff(inner, f_args[i], order, **options)
orders[i] = 0
return _partial_diff(ctx, fdiff_inner, xs, orders, options)
@defun
def diffs(ctx, f, x, n=None, **options):
r"""
Returns a generator that yields the sequence of derivatives
.. math ::
f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
function evaluations to generate the first `k` derivatives,
rather than the roughly `O(k^2)` evaluations
required if one calls :func:`~mpmath.diff` `k` separate times.
With `n < \infty`, the generator stops as soon as the
`n`-th derivative has been generated. If the exact number of
needed derivatives is known in advance, this is further
slightly more efficient.
Options are the same as for :func:`~mpmath.diff`.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15
>>> nprint(list(diffs(cos, 1, 5)))
[0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
>>> for i, d in zip(range(6), diffs(cos, 1)):
... print("%s %s" % (i, d))
...
0 0.54030230586814
1 -0.841470984807897
2 -0.54030230586814
3 0.841470984807897
4 0.54030230586814
5 -0.841470984807897
"""
if n is None:
n = ctx.inf
else:
n = int(n)
if options.get('method', 'step') != 'step':
k = 0
while k < n:
yield ctx.diff(f, x, k, **options)
k += 1
return
singular = options.get('singular')
if singular:
yield ctx.diff(f, x, 0, singular=True)
else:
yield f(ctx.convert(x))
if n < 1:
return
if n == ctx.inf:
A, B = 1, 2
else:
A, B = 1, n+1
while 1:
callprec = ctx.prec
y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
for k in xrange(A, B):
try:
ctx.prec = workprec
d = ctx.difference(y, k) / norm**k
finally:
ctx.prec = callprec
yield +d
if k >= n:
return
A, B = B, int(A*1.4+1)
B = min(B, n)
def iterable_to_function(gen):
gen = iter(gen)
data = []
def f(k):
for i in xrange(len(data), k+1):
data.append(next(gen))
return data[k]
return f
@defun
def diffs_prod(ctx, factors):
r"""
Given a list of `N` iterables or generators yielding
`f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
generate `g(x), g'(x), g''(x), \ldots` where
`g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
At high precision and for large orders, this is typically more efficient
than numerical differentiation if the derivatives of each `f_k(x)`
admit direct computation.
Note: This function does not increase the working precision internally,
so guard digits may have to be added externally for full accuracy.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: exp(x)*cos(x)*sin(x)
>>> u = diffs(f, 1)
>>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
>>> next(u); next(v)
1.23586333600241
1.23586333600241
>>> next(u); next(v)
0.104658952245596
0.104658952245596
>>> next(u); next(v)
-5.96999877552086
-5.96999877552086
>>> next(u); next(v)
-12.4632923122697
-12.4632923122697
"""
N = len(factors)
if N == 1:
for c in factors[0]:
yield c
else:
u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
n = 0
while 1:
#yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
s = u(n) * v(0)
a = 1
for k in xrange(1,n+1):
a = a * (n-k+1) // k
s += a * u(n-k) * v(k)
yield s
n += 1
def dpoly(n, _cache={}):
"""
nth differentiation polynomial for exp (Faa di Bruno's formula).
TODO: most exponents are zero, so maybe a sparse representation
would be better.
"""
if n in _cache:
return _cache[n]
if not _cache:
_cache[0] = {(0,):1}
R = dpoly(n-1)
R = dict((c+(0,),v) for (c,v) in iteritems(R))
Ra = {}
for powers, count in iteritems(R):
powers1 = (powers[0]+1,) + powers[1:]
if powers1 in Ra:
Ra[powers1] += count
else:
Ra[powers1] = count
for powers, count in iteritems(R):
if not sum(powers):
continue
for k,p in enumerate(powers):
if p:
powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
if powers2 in Ra:
Ra[powers2] += p*count
else:
Ra[powers2] = p*count
_cache[n] = Ra
return _cache[n]
@defun
def diffs_exp(ctx, fdiffs):
r"""
Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
At high precision and for large orders, this is typically more efficient
than numerical differentiation if the derivatives of `f(x)`
admit direct computation.
Note: This function does not increase the working precision internally,
so guard digits may have to be added externally for full accuracy.
**Examples**
The derivatives of the gamma function can be computed using
logarithmic differentiation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>>
>>> def diffs_loggamma(x):
... yield loggamma(x)
... i = 0
... while 1:
... yield psi(i,x)
... i += 1
...
>>> u = diffs_exp(diffs_loggamma(3))
>>> v = diffs(gamma, 3)
>>> next(u); next(v)
2.0
2.0
>>> next(u); next(v)
1.84556867019693
1.84556867019693
>>> next(u); next(v)
2.49292999190269
2.49292999190269
>>> next(u); next(v)
3.44996501352367
3.44996501352367
"""
fn = iterable_to_function(fdiffs)
f0 = ctx.exp(fn(0))
yield f0
i = 1
while 1:
s = ctx.mpf(0)
for powers, c in iteritems(dpoly(i)):
s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
yield s * f0
i += 1
@defun
def differint(ctx, f, x, n=1, x0=0):
r"""
Calculates the Riemann-Liouville differintegral, or fractional
derivative, defined by
.. math ::
\,_{x_0}{\mathbb{D}}^n_xf(x) \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
\int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
where `f` is a given (presumably well-behaved) function,
`x` is the evaluation point, `n` is the order, and `x_0` is
the reference point of integration (`m` is an arbitrary
parameter selected automatically).
With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
the second derivative `f''(x)`, etc. With `n = -1`, it gives
`\int_{x_0}^x f(t) dt`, with `n = -2`
it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
As `n` is permitted to be any number, this operator generalizes
iterated differentiation and iterated integration to a single
operator with a continuous order parameter.
**Examples**
There is an exact formula for the fractional derivative of a
monomial `x^p`, which may be used as a reference. For example,
the following gives a half-derivative (order 0.5)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> x = mpf(3); p = 2; n = 0.5
>>> differint(lambda t: t**p, x, n)
7.81764019044672
>>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
7.81764019044672
Another useful test function is the exponential function, whose
integration / differentiation formula easy generalizes
to arbitrary order. Here we first compute a third derivative,
and then a triply nested integral. (The reference point `x_0`
is set to `-\infty` to avoid nonzero endpoint terms.)::
>>> differint(lambda x: exp(pi*x), -1.5, 3)
0.278538406900792
>>> exp(pi*-1.5) * pi**3
0.278538406900792
>>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
1922.50563031149
>>> exp(pi*3.5) / pi**3
1922.50563031149
However, for noninteger `n`, the differentiation formula for the
exponential function must be modified to give the same result as the
Riemann-Liouville differintegral::
>>> x = mpf(3.5)
>>> c = pi
>>> n = 1+2*j
>>> differint(lambda x: exp(c*x), x, n)
(-123295.005390743 + 140955.117867654j)
>>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
(-123295.005390743 + 140955.117867654j)
"""
m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
r = m-n-1
g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
return ctx.diff(g, x, m) / ctx.gamma(m-n)
@defun
def diffun(ctx, f, n=1, **options):
r"""
Given a function `f`, returns a function `g(x)` that evaluates the nth
derivative `f^{(n)}(x)`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> cos2 = diffun(sin)
>>> sin2 = diffun(sin, 4)
>>> cos(1.3), cos2(1.3)
(0.267498828624587, 0.267498828624587)
>>> sin(1.3), sin2(1.3)
(0.963558185417193, 0.963558185417193)
The function `f` must support arbitrary precision evaluation.
See :func:`~mpmath.diff` for additional details and supported
keyword options.
"""
if n == 0:
return f
def g(x):
return ctx.diff(f, x, n, **options)
return g
@defun
def taylor(ctx, f, x, n, **options):
r"""
Produces a degree-`n` Taylor polynomial around the point `x` of the
given function `f`. The coefficients are returned as a list.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(chop(taylor(sin, 0, 5)))
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
The coefficients are computed using high-order numerical
differentiation. The function must be possible to evaluate
to arbitrary precision. See :func:`~mpmath.diff` for additional details
and supported keyword options.
Note that to evaluate the Taylor polynomial as an approximation
of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
and the point of the Taylor expansion must be subtracted from
the argument:
>>> p = taylor(exp, 2.0, 10)
>>> polyval(p[::-1], 2.5 - 2.0)
12.1824939606092
>>> exp(2.5)
12.1824939607035
"""
gen = enumerate(ctx.diffs(f, x, n, **options))
if options.get("chop", True):
return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
else:
return [d/ctx.factorial(i) for i, d in gen]
@defun
def pade(ctx, a, L, M):
r"""
Computes a Pade approximation of degree `(L, M)` to a function.
Given at least `L+M+1` Taylor coefficients `a` approximating
a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
polynomials `P, Q` satisfying
.. math ::
P = \sum_{k=0}^L p_k x^k
Q = \sum_{k=0}^M q_k x^k
Q_0 = 1
A(x) Q(x) = P(x) + O(x^{L+M+1})
`P(x)/Q(x)` can provide a good approximation to an analytic function
beyond the radius of convergence of its Taylor series (example
from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
Ch.1A)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> one = mpf(1)
>>> def f(x):
... return sqrt((one + 2*x)/(one + x))
...
>>> a = taylor(f, 0, 6)
>>> p, q = pade(a, 3, 3)
>>> x = 10
>>> polyval(p[::-1], x)/polyval(q[::-1], x)
1.38169105566806
>>> f(x)
1.38169855941551
"""
# To determine L+1 coefficients of P and M coefficients of Q
# L+M+1 coefficients of A must be provided
assert(len(a) >= L+M+1)
if M == 0:
if L == 0:
return [ctx.one], [ctx.one]
else:
return a[:L+1], [ctx.one]
# Solve first
# a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
# ...
# a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
A = ctx.matrix(M)
for j in range(M):
for i in range(min(M, L+j+1)):
A[j, i] = a[L+j-i]
v = -ctx.matrix(a[(L+1):(L+M+1)])
x = ctx.lu_solve(A, v)
q = [ctx.one] + list(x)
# compute p
p = [0]*(L+1)
for i in range(L+1):
s = a[i]
for j in range(1, min(M,i) + 1):
s += q[j]*a[i-j]
p[i] = s
return p, q
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/mpmath/calculus/differentiation.py
|
differentiation.py
|
import os,sys,string,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0,parentdir)
import unique
#Compare splicing annotations in LeafCutter and AltAnalyze's PSI EventAnnotation file
def verifyFile(filename):
status = False
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines(): status = True;break
except Exception: status = False
return status
def importDatabaseEventAnnotations(species,platform):
terminal_exons={}
header=True
count=0
fn = 'AltDatabase/'+species+'/'+platform+'/'+species+'_Ensembl_exons.txt'
fn = unique.filepath(fn)
for line in open(fn,'rU'):
line = line.rstrip('\n')
values = string.split(line,'\t')
if header:
eI = values.index('splice_events')
header=False
continue
exon = values[0]
event = values[eI]
if 'alt-N-term' in event or 'altPromoter' in event:
if 'cassette' not in event:
terminal_exons[exon] = 'altPromoter'
count+=1
elif 'alt-C-term' in event:
if 'cassette' not in event:
terminal_exons[exon] = 'alt-C-term'
count+=1
"""
elif 'bleedingExon' in event or 'altFinish' in event:
terminal_exons[exon] = 'bleedingExon'
count+=1"""
print count, 'terminal exon annotations stored'
return terminal_exons
def formatFeatures(features):
features2=[]
for f in features:
f = string.split(f,'|')
direction = f[-1]
annotation = f[0]
f = '('+direction+')'+annotation
features2.append(f)
return features2
class SplicingAnnotations(object):
def __init__(self, symbol, description,junc1,junc2,altExons,proteinPredictions,eventAnnotation,coordinates):
self.symbol = symbol
self.description = description
self.junc1 = junc1
self.junc2 = junc2
self.altExons = altExons
self.proteinPredictions = proteinPredictions
self.eventAnnotation = eventAnnotation
self.coordinates = coordinates
def Symbol(self): return self.symbol
def Description(self): return self.description
def Junc1(self): return self.junc1
def Junc2(self): return self.junc2
def AltExons(self): return self.altExons
def ProteinPredictions(self): return self.proteinPredictions
def EventAnnotation(self): return self.eventAnnotation
def Coordinates(self): return self.coordinates
def importPSIAnnotations(PSIpath):
header=True
count=0
events={}
for line in open(PSIpath,'rU').xreadlines():
line = line.rstrip('\n')
values = string.split(line,'\t')
if header:
sI = values.index('Symbol')
dI = values.index('Description')
eI = values.index('Examined-Junction')
bI = values.index('Background-Major-Junction')
aI = values.index('AltExons')
pI = values.index('ProteinPredictions')
vI = values.index('EventAnnotation')
cI = values.index('Coordinates')
rI = values.index('rawp')
header=False
else:
symbol = values[sI]
description = values[dI]
junc1 = values[eI]
junc2 = values[bI]
altExons = values[aI]
proteinPredictions = values[pI]
eventAnnotation = values[vI]
coordinates = values[cI]
key = symbol+':'+junc1+"|"+junc2
sa = SplicingAnnotations(symbol, description,junc1,junc2,altExons,proteinPredictions,eventAnnotation,coordinates)
coord1,coord2 = string.split(coordinates,'|')
events[coord1] = sa
events[coord2] = sa
return events
def importLeafCutterJunctions(leafcutter_clusters):
count=0
coordinate_clusters={}
for line in open(leafcutter_clusters,'rU').xreadlines():
line = line.rstrip('\n')
if ':' in line:
cluster = string.split(line,' ')[0]
cluster = string.split(cluster,':')
cluster_id = cluster[-1]
coordinates = cluster[0]+':'+cluster[1]+'-'+cluster[1]
try: coordinate_clusters[cluster_id].append(coordinates)
except Exception: coordinate_clusters[cluster_id] = [coordinates]
return coordinate_clusters
def importLeafSignificant(leafcutter_significant,coordinate_clusters):
header=True
count=0
significant_coordinates={}
unique_clusters=0
for line in open(leafcutter_significant,'rU').xreadlines():
values = line.rstrip('\n')
values = string.split(values,'\t')
if header:
pI = values.index('p')
header=False
else:
cluster = string.split(values[0],':')[1]
try:
p = float(values[pI])
except Exception:
p=1
coordinates = coordinate_clusters[cluster]
if p<0.05:
unique_clusters+=1
for coord in coordinates:
significant_coordinates[coord] = cluster
print unique_clusters,'significant clusters'
return significant_coordinates
def compare_algorithms(altanalyze_dir,leafcutter_significant,leafcutter_clusters,species,platform):
""" Add splicing annotations for PSI results """
### Get all splice-junction pairs
coordinate_clusters = importLeafCutterJunctions(leafcutter_clusters)
significant_coordinates = importLeafSignificant(leafcutter_significant,coordinate_clusters)
eventAnnotations = importPSIAnnotations(altanalyze_dir)
unique_clusters={}
for i in significant_coordinates:
if i not in eventAnnotations:
print i;sys.exit()
else:
print 'a',i;sys.exit()
### Get all de novo junction anntations (includes novel junctions)
psievents = importEventAnnotations(resultsDir,species,psievents,annotationType='de novo')
### Get all known junction annotations
psievents = importEventAnnotations(resultsDir,species,psievents)
### Import the annotations that provide alternative terminal events
terminal_exons = importDatabaseEventAnnotations(species,platform)
### Update our PSI annotation file with these improved predictions
updatePSIAnnotations(PSIpath, species, psievents, terminal_exons, junctionPairFeatures)
if __name__ == '__main__':
import multiprocessing as mlp
import getopt
#bam_dir = "H9.102.2.6.bam"
#outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed'
platform = 'RNASeq'
species = 'Hs'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Insufficient command line flags supplied."
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['altanalyze=','leafcutter=','leafclusters=','species=','platform=','array='])
for opt, arg in options:
if opt == '--altanalyze': altanalyze_dir=arg
elif opt == '--leafcutter': leafcutter_significant=arg
elif opt == '--leafclusters': leafcutter_clusters=arg
elif opt == '--species': species=arg
elif opt == '--platform': platform=arg
elif opt == '--array': platform=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
compare_algorithms(altanalyze_dir,leafcutter_significant,leafcutter_clusters,species,platform)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/benchmarking/scripts/LeafCutterTranslation.py
|
LeafCutterTranslation.py
|
import os,sys,string,inspect
### Import python modules from an upstream directory
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0,parentdir)
import unique
import export
"""Goal: Import de novo Cufflinks transcripts and expression to infer dPSI for junctions"""
def verifyFile(filename):
status = False
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines(): status = True;break
except Exception: status = False
return status
def getJunctions(species,input_dir,fpkm_tracking_dir,gtf_dir):
ensembl_chr_db={}
gene_intron_db={}
### Get the chr for each Ensembl gene to carefully match to Cufflinks Symbol
exon_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
fn = unique.filepath(exon_dir)
header = True
for line in open(fn,'rU').xreadlines():
line = line.rstrip('\n')
t = string.split(line,'\t')
if header:
header = False
else:
ensembl = t[0]
exon = t[1]
chr = t[2]
strand = t[3]
start =int(t[4])
end = int(t[5])
ensembl_chr_db[ensembl]=chr,strand
if 'I' in exon:
intronID = ensembl+':'+exon
coords = [start,end]
coords.sort()
coords.append(intronID)
try: gene_intron_db[ensembl].append(coords)
except Exception: gene_intron_db[ensembl] = [coords]
symbol_ensembl_db={}
altanalyze_gene_annot_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations.txt'
fn = unique.filepath(altanalyze_gene_annot_dir)
for line in open(fn,'rU').xreadlines():
line = line.rstrip('\n')
ensemblID,symbol,description,null = string.split(line,'\t')
if 'Ensembl Gene ID' != ensemblID:
chr,strand = ensembl_chr_db[ensemblID]
symbol_ensembl_db[symbol]=ensemblID,chr,strand
### Import gene, isoform and expression information
isoform_fpkm={}
isoform_annotation={}
header = True
for line in open(fpkm_tracking_dir,'rU').xreadlines():
line = line.rstrip('\n')
t = string.split(line,'\t')
if header:
gi = t.index('gene_id')
si = t.index('gene_short_name')
ti = t.index('tracking_id')
fi = t.index('FPKM')
header = False
else:
geneID = t[gi]
symbol = t[si]
transcriptID = t[ti]
fpkm = float(t[fi])
isoform_fpkm[transcriptID]=fpkm
isoform_annotation[transcriptID]=symbol
transcript_junctions={}
transcript_exons = {}
for line in open(gtf_dir,'rU').xreadlines():
line = line.rstrip('\n')
t = string.split(line,'\t')
chr = t[0]
type = t[2]
start = int(t[3])
stop = int(t[4])
strand = t[6]
annotations = t[8]
annotations = string.split(annotations,'"')
geneID = annotations[1]
transcriptID = annotations[3]
if strand == '-':
start, stop = stop, start
if type == 'exon':
try: transcript_exons[transcriptID,strand].append([chr,start,stop])
except Exception: transcript_exons[transcriptID,strand] = [[chr,start,stop]]
parent = findParentDir(fpkm_tracking_dir)
output_file = input_dir+'/'+parent+'__junction.bed'
io = export.ExportFile(output_file)
io.write('track name=junctions description="TopHat junctions"\n')
unmatched_gene={}
intron_retention=0
for (transcriptID,strand) in transcript_exons:
exons = transcript_exons[transcriptID,strand]
numExons = len(exons)
fpkm = isoform_fpkm[transcriptID]
symbol = isoform_annotation[transcriptID]
chr,start,stop = exons[0]
if len(symbol)>0 and symbol in symbol_ensembl_db:
ensemblID, EnsChr,EnsStrand = symbol_ensembl_db[symbol]
strand = EnsStrand ### Likely more accurate
if strand == '-':
exons.reverse()
if chr == EnsChr: ### Double check this is the correct gene symbol for that gene ID
i=0
while i<(numExons-1):
chr,start,stop = exons[i]
chr,next_start,next_stop = exons[i+1]
junction = stop,next_start
"""
if stop == 63886987 or start == 63886987 or next_start == 63886987 or next_stop == 63886987:
print junction, start, stop, next_start, next_stop"""
if len(symbol)>0 and symbol in symbol_ensembl_db:
if chr == EnsChr: ### Double check this is the correct gene symbol for that gene ID
try: transcript_junctions[ensemblID,chr,junction,strand]+=fpkm
except Exception: transcript_junctions[ensemblID,chr,junction,strand]=fpkm
i+=1
### Identify retained introns in transcripts
fpkm = int(fpkm*10)
increment=-1
for exon in exons:
chr,start,end = exon
if ensemblID in gene_intron_db and fpkm>0:
for (intron5,intron3,intronID) in gene_intron_db[ensemblID]:
start_end = [start,end]; start_end.sort(); start,end = start_end
coords = [start,end]+[intron5,intron3]
coords.sort()
if coords[0] == start and coords[-1] == end:
"""
if ensemblID == 'ENSG00000167522':
if strand == '+': print [start,end], [intron5,intron3];sys.exit()
"""
start = intron5
stop = intron3
if strand=='-': increment = -1
else: increment = -1
outlier_start = start-10+increment; outlier_end = start+10+increment
junction_id = intronID+'-'+str(start)
exon_lengths = '10,10'; dist = '0,0'
entries = [chr,str(outlier_start),str(outlier_end),junction_id,str(fpkm),strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
io.write(string.join(entries,'\t')+'\n')
### 3' junction
if strand=='-': increment = 0
else: increment = 0
outlier_start = stop-10+increment; outlier_end = stop+10+increment
junction_id = intronID+'-'+str(stop)
exon_lengths = '10,10'; dist = '0,0'
entries = [chr,str(outlier_start),str(outlier_end),junction_id,str(fpkm),strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
io.write(string.join(entries,'\t')+'\n')
intron_retention +=1
else:
unmatched_gene[symbol]=None
else:
unmatched_gene[symbol]=None
print len(unmatched_gene), 'Unmatched gene symbols'
num_junctions=0
for (ensemblID,chr,junction,strand) in transcript_junctions:
fpkm = int(transcript_junctions[ensemblID,chr,junction,strand]*10)
if fpkm>0:
fpkm = str(fpkm)
junction = list(junction)
junction.sort()
junction_id = ensemblID+':'+str(junction[0])+'-'+str(junction[1])
start = int(junction[0])
end = int(junction[1])
if strand == '-':
alt_start = start
alt_end = end-1
else:
alt_start = start
alt_end = end-1
alt_start = str(alt_start)
alt_end = str(alt_end)
#exon_lengths = '10,10'; dist = '0,0'
exon_lengths = '0,0'; dist = '0,0'
entries = [chr,alt_start,alt_end,junction_id,fpkm,strand,alt_start,alt_end,'255,0,0\t2',exon_lengths,'0,'+dist]
io.write(string.join(entries,'\t')+'\n')
num_junctions+=1
io.close()
print intron_retention, 'transcripts with intron retention'
print num_junctions, 'junctions exported to:',output_file
def findParentDir(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
return string.split(filename,'/')[-2]
if __name__ == '__main__':
import multiprocessing as mlp
import getopt
species = 'Hs'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Insufficient command line flags supplied."
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i='])
for opt, arg in options:
if opt == '--i': input_dir=arg
elif opt == '--species': species=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
dir_list = unique.read_directory(input_dir)
for file in dir_list:
if 'isoforms.fpkm_tracking' in file:
fpkm_tracking_dir = input_dir+'/'+file
elif 'transcripts.gtf' in file:
gtf_dir = input_dir+'/'+file
getJunctions(species,input_dir,fpkm_tracking_dir,gtf_dir)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/benchmarking/scripts/IsoformToJunctionCounts.py
|
IsoformToJunctionCounts.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import copy
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".dat": dir_list2.append(entry)
return dir_list2
def importEnsemblUniprot(filename):
fn=filepath(filename); x = 0
for line in open(fn,'r').xreadlines():
data, newline= string.split(line,'\n')
t = string.split(data,'\t')
if x==0: x=1
else:
try:
ensembl=t[0];uniprot=t[1]
try: uniprot_ensembl_db[uniprot].append(ensembl)
except KeyError: uniprot_ensembl_db[uniprot] = [ensembl]
except Exception: null=[] ### Occurs when no file is located on the AltAnalyze server
print len(uniprot_ensembl_db),"UniProt entries with Ensembl annotations"
def exportEnsemblUniprot(filename):
import export
export_data = export.ExportFile(filename)
export_data.write(string.join(['ensembl','uniprot'],'\t')+'\n')
for uniprot in uniprot_ensembl_db:
for ensembl in uniprot_ensembl_db[uniprot]:
export_data.write(string.join([ensembl,uniprot],'\t')+'\n')
export_data.close()
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def findSpeciesInUniProtFiles(force):
### Download all UniProt annotation files and grab all species names, TaxIDs and corresponding URLs
import AltAnalyze
###Get species annotations from the GO-Elite config
species_annot_db=AltAnalyze.importGOEliteSpeciesInfo(); tax_db={}
for species_full in species_annot_db:
taxid=species_annot_db[species_full].TaxID()
tax_db[taxid]=species_full
if force == 'yes':
### Should only need to be run if UniProt changes it's species to file associations or new species supported by Ensembl
import export; import update
filesearch = '_sprot_'
all_swissprot = update.getFTPData('ftp.expasy.org','/databases/uniprot/current_release/knowledgebase/taxonomic_divisions',filesearch)
for file in all_swissprot:
gz_filepath, status = update.download(file,'uniprot_temp/','')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
species_uniprot_db={}; altanalyze_species_uniprot_db={}
dir=read_directory('/uniprot_temp')
for filename in dir:
fn=filepath('uniprot_temp/'+filename)
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if data[0:2] == 'OX':
taxid = string.split(data,'=')[1][:-1]
if taxid in tax_db:
species_full = tax_db[taxid]
elif data[0:2] == 'OS':
species = data[5:]
species = string.split(species,' ')[:2]
species_full = string.join(species,' ')
elif data[0] == '/':
url = 'ftp.expasy.org/databases/uniprot/current_release/knowledgebase/taxonomic_divisions/'+filename
ss = string.split(species_full,' ')
if len(ss)==2: ### Species name is in the format Homo sapiens - and '(' not in species_full and ')' not in species_full and '/' not in species_full
try: species_uniprot_db[species_full].append((taxid,'ftp://'+url+'.gz'))
except KeyError: species_uniprot_db[species_full] = [(taxid,'ftp://'+url+'.gz')]
taxid = ''; species_full = ''
from build_scripts import EnsemblImport
species_uniprot_db = EnsemblImport.eliminate_redundant_dict_values(species_uniprot_db)
### Export all species to UniProt file relationships so this function needs to only be run once
import export
up = export.ExportFile('Config/uniprot-species-file.txt')
for species_full in species_uniprot_db:
values = species_uniprot_db[species_full]
if len(values)>1:
found = 'no'
for (taxid,url) in values:
if taxid in tax_db:
if species_full == tax_db[taxid]: found='yes'; print 'ambiguity resolved:',species_full; break
if found == 'yes': break
else: (taxid,url) = values[0]
up.write(string.join([species_full,taxid,url],'\t')+'\n')
up.close()
def getUniProtURLsForAllSupportedSpecies():
### Import all UniProt supproted species and URLs
species_uniprot_db={}
fn=filepath('Config/uniprot-species-file.txt')
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
species_full,taxid,url = string.split(data,'\t')
if 'Homo sapiens' not in species_full: ### There's a separate file for us humans (so egotistical!!!)
species_uniprot_db[species_full] = taxid,url
import AltAnalyze
###Get species annotations from the GO-Elite config
species_annot_db=AltAnalyze.importGOEliteSpeciesInfo()
### Export all urls for currently supported species
import UI
file_location_defaults = UI.importDefaultFileLocations()
location_db={}; species_added=[]
for species_full in species_annot_db:
if species_full in species_uniprot_db:
taxid,url = species_uniprot_db[species_full]
species_code = species_annot_db[species_full].SpeciesCode()
try: location_db[url].append(species_code)
except Exception: location_db[url] = [species_code]
species_added.append(species_full)
for species_full in species_annot_db:
taxid = species_annot_db[species_full].TaxID()
species_code = species_annot_db[species_full].SpeciesCode()
if species_full not in species_added:
for species_name in species_uniprot_db:
tax,url = species_uniprot_db[species_name]
if tax == taxid:
location_db[url].append(species_code)
print species_code
for url in location_db:
species = string.join(location_db[url],'|')
fl = UI.FileLocationData('ftp', url, species)
try: file_location_defaults['UniProt'].append(fl)
except KeyError: file_location_defaults['UniProt'] = [fl]
UI.exportDefaultFileLocations(file_location_defaults)
def import_uniprot_db(filename):
fn=filepath(filename); global species_not_imported; species_not_imported=[]
ac = '';sm='';id = '';sq = '';osd = ''; gn = '';dr = '';de = '';ft_string = ''; kw = ''; ft = []; ensembl = []; mgi = []; unigene = []; embl = []
ft_call=''; rc=''; go=''; x = 0; y = 0; count = 0
for line in open(fn,'r').xreadlines():
data, newline= string.split(line,'\n');
#if x<3: print data
#else: kill
if 'SRSF1_HUMAN' in data:
count = 0
if count == 1: print data
if data[0:2] == 'ID': id += data[5:]
elif "GO; GO:" in data: go += data[5:]
elif data[0:2] == 'DE': de += data[5:]
elif data[0:2] == 'KW': kw += data[5:] ### Keywords
elif data[0:2] == 'AC': ac += data[5:]
elif data[0:2] == 'OS': osd += data[5:]
elif data[0:2] == 'RC': rc = rc + data[5:]
elif data[0:2] == ' ': sq += data[5:]
elif 'DR Ensembl;' in data:
null,dr= string.split(data,'Ensembl; '); dr = string.split(dr,'; '); ensembl+=dr[:-1]
elif 'DR MGI;' in data: null,dr,null= string.split(data,'; '); mgi.append(dr)
elif 'DR UniGene;' in data: null,dr,null= string.split(data,'; '); unigene.append(dr)
elif 'DR EMBL;' in data: null,dr,null,null,null= string.split(data,'; '); embl.append(dr)
elif 'GN Name=' in data:
null,gn = string.split(data,'GN Name='); gn = gn[0:-1]
elif data[0:2] == 'FT':
try:
if len(ft_string) > 0 and data[5] == ' ': ft_string = ft_string + data[33:]
elif len(ft_string) > 0 and data[5] != ' ': #if previous loop added data but the next ft line is a new piece of functional data
ft.append(ft_string) #append the previous value
ft_string = data[5:]
else: ft_string = ft_string + data[5:]
except IndexError:
print ft_string;kill
elif data[0:2] == 'CC': ###grab function description information
if '-!-' in data: x=0;y=0
if x == 1: ft_call = ft_call + data[8:]
if y == 1: sm = sm + data[8:]
###if the CC entry is function, begin adding data
if '-!- FUNCTION:' in data: ft_call = ft_call + data[19:]; x = 1
if '-!- SIMILARITY' in data: sm = sm + data[21:]; y = 1
if data[0] == '/':
###Alternatively: if species_name in osd or 'trembl' in filename:
if count == 1:
count = 2
if species_name == 'Mus musculus': alt_osd = 'mouse'
else: alt_osd = 'alt_osd'
try:
if species_name in osd or alt_osd in osd: null=[]
except TypeError: print species_name,osd,alt_osd;kill
if species_name in osd or alt_osd in osd:
class_def,cellular_components = analyzeCommonProteinClassesAndCompartments(sm,kw,ft_call,ft_string,rc,de,go)
ft_list2 = []; ac = string.split(ac,'; '); ac2=[]
for i in ac: i = string.replace(i,';',''); ac2.append(i)
ac = ac2
try: id = string.split(id,' ');id = id[0]
except ValueError: id = id
sq_str = ''; sq = string.split(sq,' ')
for entry in sq: sq_str = sq_str + entry; sq = sq_str
ft.append(ft_string) #always need to add the current ft_string
if len(ft_string) > 0: # or len(ft_string) == 0:
for entry in ft:
entry = string.split(entry,' ')
ft_list = []
for item in entry:
if len(item)>0:
try: item = int(item)
except ValueError:
if item[0] == ' ': item = item[1:]
if item[-1] == ' ': item = item[0:-1]
else: item = item
ft_list.append(item)
ft_list2.append(ft_list)
if 'trembl' in filename: file_type = 'fragment'
else: file_type = 'swissprot'
alternate_ensembls=[]
for secondary_ac in ac:
if secondary_ac in uniprot_ensembl_db:
for alt_ens in uniprot_ensembl_db[secondary_ac]: alternate_ensembls.append(alt_ens)
secondary_to_primary_db[secondary_ac] = id
### Update this database with new annotations from the file
for ens in ensembl:
for secondary_ac in ac:
try:
if ens not in uniprot_ensembl_db[secondary_ac]:
uniprot_ensembl_db[secondary_ac].append(ens)
except KeyError: uniprot_ensembl_db[secondary_ac]=[ens]
ensembl += alternate_ensembls
y = UniProtAnnotations(id,ac,sq,ft_list2,ensembl,gn,file_type,de,embl,unigene,mgi,ft_call,class_def,cellular_components)
uniprot_db[id] = y
else: species_not_imported.append(osd)
ac = '';id = '';sq = '';osd = '';gn = '';dr = '';de = ''; ft_call=''; rc='';sm='';go=''; kw=''
ft_string = '';ft = []; ensembl = []; mgi = []; unigene = []; embl = []
x+=1
print "Number of imported swissprot entries:", len(uniprot_db)
def analyzeCommonProteinClassesAndCompartments(sm,kw,ft_call,ft_string,rc,de,go):
### Used to assign "Common Protein Classes" annotations to Gene Expression summary file (ExpressionOutput folder)
class_def=[]; annotation=[]; cellular_components = []
if 'DNA-binding domain' in sm or 'Transcription' in go or 'Transcription regulation' in kw: class_def.append('transcription regulator')
if 'protein kinase superfamily' in sm or 'Kinase' in go: class_def.append('kinase')
if 'mRNA splicing' in kw or 'mRNA processing' in kw: class_def.append('splicing regulator')
if 'G-protein coupled receptor' in sm or 'LU7TM' in sm:
g_type = []
if ('adenylate cyclase' in ft_call) or ('adenylyl cyclase'in ft_call):
###if both occur
if (('stimulat' in ft_call) or ('activat' in ft_call)) and ('inhibit' in ft_call):
if 'inhibit aden' in ft_call: g_type.append('Gi')
if 'stimulate aden' in ft_call or 'activate aden' in ft_call: g_type.append('Gs')
elif ('stimulat' in ft_call) or ('activat' in ft_call): g_type.append('Gs')
elif ('inhibit' in ft_call): g_type.append('Gi')
if ('cAMP' in ft_call):
if ('stimulat' in ft_call) or ('activat' in ft_call): g_type.append('Gs')
if ('inhibit' in ft_call): g_type.append('Gi')
if ('G(s)' in ft_call): g_type.append('Gs')
if ('G(i)' in ft_call): g_type.append('Gi')
if ('pertussis' in ft_call and 'insensitive' not in ft_call): g_type.append('Gi')
if ('G(i/0)' in ft_call) or ('G(i/o)' in ft_call): g_type.append('Gi')
if ('G(o)' in ft_call): g_type.append('Go')
if ('G(alpha)q' in ft_call): g_type.append('Gq')
if ('G(11)' in ft_call): g_type.append('G11')
if ('G(12)' in ft_call): g_type.append('G12')
if ('G(13)' in ft_call): g_type.append('G13')
if ('mobiliz' in ft_call and 'calcium' in ft_call and 'without formation' not in ft_call): g_type.append('Gq')
if ('phosphatidyl' in ft_call and 'inositol' in ft_call) or ('G(q)' in ft_call) or ('phospholipase C' in ft_call):
g_type.append('Gq')
if ('inositol phos' in ft_call) or ('phosphoinositide' in ft_call) or ('PKC'in ft_call) or ('PLC' in ft_call):
g_type.append('Gq')
if ('intracellular' in ft_call and 'calcium' in ft_call) and 'nor induced' not in ft_call: g_type.append('Gq')
if 'G-alpha-11' in ft_call: g_type.append('G11')
if 'Orphan' in ft_call or 'orphan' in ft_call: g_type.append('orphan')
if 'taste' in ft_call or 'Taste' in ft_call: g_type.append('taste')
if 'vision' in ft_call or 'Vision' in ft_call: g_type.append('vision')
if 'odorant' in ft_call or 'Odorant' in ft_call: g_type.append('oderant')
if 'influx of extracellar calcium' in ft_call: g_type.append('Gq')
if 'pheromone receptor' in ft_call or 'Pheromone receptor' in ft_call: g_type.append('pheromone')
g_protein_list = unique.unique(g_type); g_protein_str = string.join(g_protein_list,'|')
class_def.append('GPCR(%s)' % g_protein_str)
elif 'receptor' in sm or 'Receptor' in go: class_def.append('receptor')
if len(ft_string)>0: ### Add cellular component annotations
if 'ecreted' in sm: k = 1; annotation.append('extracellular')
if 'Extraceullar space' in sm: k = 1; annotation.append('extracellular')
if 'ecreted' in go: k = 1; annotation.append('extracellular')
if 'xtracellular' in go: k = 1; annotation.append('extracellular')
if 'Membrane' in sm: k = 1; annotation.append('transmembrane')
if 'TRANSMEM' in ft_string: k = 1; annotation.append('transmembrane')
if 'integral to membrane' in go: k = 1; annotation.append('transmembrane')
if 'Nucleus' in sm: k = 1; annotation.append('nucleus')
if 'nucleus' in go: k = 1; annotation.append('nucleus')
if 'Cytoplasm' in sm: k = 1; annotation.append('cytoplasm')
if 'Mitochondrion' in sm: k = 1; annotation.append('mitochondrion')
if 'SIGNAL' in ft_string: k = 1; annotation.append('signal')
###Generate probably secreted annotations
if 'signal' in annotation and 'transmembrane' not in annotation:
for entry in annotation:
if entry != 'signal': cellular_components.append(entry)
cellular_components.append('extracellular');annotation = cellular_components
elif 'signal' in annotation and 'transmembrane' in annotation:
for entry in annotation:
if entry != 'signal': cellular_components.append(entry)
annotation = cellular_components
cellular_components = string.join(annotation,'|')
class_def = string.join(class_def,'|')
return class_def, cellular_components
class UniProtAnnotations:
def __init__(self,primary_id,secondary_ids,sequence,ft_list,ensembl,name,file_type,description,embl,unigene,mgi,ft_call,class_def,cellular_components):
self._primary_id = primary_id; self._sequence = sequence; self._name = name; self._secondary_ids = secondary_ids; self._class_def = class_def
self._file_type = file_type; self._description = description; self._ensembl = ensembl; self._ft_list = ft_list
self._embl = embl; self._unigene = unigene; self._mgi = mgi; self._ft_call = ft_call; self._cellular_components = cellular_components
def PrimaryID(self): return self._primary_id
def SecondaryIDs(self): return self._secondary_ids
def Sequence(self): return self._sequence
def Name(self): return self._name
def FTList(self):
new_FTList = [] ### Transform this set of feature information into objects
exlcusion_list = ['CHAIN','VARIANT','CONFLICT','VAR_SEQ']
for ft_entry in self._ft_list:
try:
if len(ft_entry)>3: feature, start, stop, description = ft_entry
else: feature, start, stop = ft_entry; description = ''
if feature not in exlcusion_list: ### Not informative annotations for AltAnalyze
dd = DomainData(feature,start,stop,description)
new_FTList.append(dd)
except ValueError:
new_FTList = new_FTList ### Occurs when no FT info present
return new_FTList
def FileType(self): return self._file_type
def Description(self): return self._description
def FunctionDescription(self):
if 'yrighted' in self._ft_call: return ''
else: return self._ft_call
def CellularComponent(self): return self._cellular_components
def ClassDefinition(self): return self._class_def
def ReSetPrimarySecondaryType(self,primary_id,secondary_id,file_type):
secondary_ids = [secondary_id]
self._primary_id = primary_id
self._secondary_ids = secondary_ids
self._file_type = file_type
def Ensembl(self):
ens_list = unique.unique(self._ensembl)
ens_str = string.join(ens_list,',')
return ens_str
def EnsemblList(self): return self._ensembl
def EMBL(self):
embl_str = string.join(self._embl,',')
return embl_str
def Unigene(self):
unigene_str = string.join(self._unigene,',')
return unigene_str
def MGI(self):
mgi_str = string.join(self._mgi,',')
return mgi_str
def DataValues(self):
output = self.Name()+'|'+self.PrimaryID()
return output
def __repr__(self): return self.DataValues()
class DomainData:
def __init__(self,feature,start,stop,description):
self._feature = feature; self._start = start; self._stop = stop; self._description = description
def Feature(self): return self._feature
def Start(self): return self._start
def Stop(self): return self._stop
def Description(self): return self._description
def DataValues(self):
output = self.Feature()+'|'+self.Description()
return output
def __repr__(self): return self.DataValues()
def export():
fasta_data = uniprot_fildir + 'uniprot_sequence.txt'
fasta_data2 = uniprot_fildir + 'uniprot_feature_file.txt'
fn=filepath(fasta_data); fn2=filepath(fasta_data2)
data = open(fn,'w'); data2 = open(fn2,'w')
custom_annotations = {}
for id in uniprot_db:
y = uniprot_db[id]; ac = ''; ac_list = y.SecondaryIDs(); sq = y.Sequence()
ft_list = y.FTList(); ft_call = y.FunctionDescription(); ens_list = y.EnsemblList()
ensembl = y.Ensembl(); mgi = y.MGI();embl = y.EMBL(); unigene = y.Unigene()
gn = y.Name();de = y.Description()
file_type = y.FileType(); ac = string.join(ac_list,',')
if '-' in id: ac2 = id; id = ac; ac = ac2
info = [id,ac,sq,gn,ensembl,de,file_type,unigene,mgi,embl]
info = string.join(info,'\t')+'\n'
data.write(info)
for ens_gene in ens_list:
if 'T0' not in ens_gene and 'P0' not in ens_gene: ### Exclude protein and transcript IDs
custom_annot=string.join([ens_gene,y.CellularComponent(), y.ClassDefinition(),gn,de,id,ac,unigene],'\t')+'\n'
if len(y.CellularComponent())>1 or len(y.ClassDefinition())>1: custom_annotations[ens_gene] = custom_annot
if len(ft_list)>0:
for dd in ft_list: ### Export domain annotations
try:
n = int(dd.Start()); n = int(dd.Stop()) ### Some now have ?. These are un-informative
info2 = string.join([id,ac,dd.Feature(),str(dd.Start()),str(dd.Stop()),dd.Description()],'\t') +'\n'
info2 = string.replace(info2,';,','')
data2.write(info2)
except ValueError: null=[]
data.close();data2.close()
###Export custom annotations for Ensembl genes
output_file = uniprot_fildir + 'custom_annotations.txt'
fn=filepath(output_file); data = open(fn,'w')
for entry in custom_annotations: data.write(custom_annotations[entry])
data.close()
def runExtractUniProt(species,species_full,uniprot_filename_url,trembl_filename_url,force):
global uniprot_ensembl_db;uniprot_ensembl_db={}
global uniprot_db;uniprot_db={}; global species_name; global uniprot_fildir
global secondary_to_primary_db; secondary_to_primary_db={}
import update; reload(update)
species_name = species_full
import UI; species_names = UI.getSpeciesInfo()
species_full = species_names[species]
species_full = string.replace(species_full,' ','_')
uniprot_file = string.split(uniprot_filename_url,'/')[-1]; uniprot_file = string.replace(uniprot_file,'.gz','')
trembl_file = string.split(trembl_filename_url,'/')[-1]; trembl_file = string.replace(trembl_file,'.gz','')
uniprot_fildir = 'AltDatabase/uniprot/'+species+'/'
uniprot_download_fildir = 'AltDatabase/uniprot/'
uniprot_ens_file = species+'_Ensembl-UniProt.txt'; uniprot_ens_location = uniprot_fildir+uniprot_ens_file
uniprot_location = uniprot_download_fildir+uniprot_file
trembl_location = uniprot_download_fildir+trembl_file
add_trembl_annotations = 'no' ### Currently we don't need these annotations
try: importEnsemblUniprot(uniprot_ens_location)
except IOError:
try:
### Download the data from the AltAnalyze website (if there)
update.downloadCurrentVersion(uniprot_ens_location,species,'txt')
importEnsemblUniprot(uniprot_ens_location)
except Exception: null=[]
try:
uniprot_ens_location_built = string.replace(uniprot_ens_location,'UniProt','Uniprot-SWISSPROT')
uniprot_ens_location_built = string.replace(uniprot_ens_location_built,'uniprot','Uniprot-SWISSPROT')
importEnsemblUniprot(uniprot_ens_location_built)
except Exception: null=[]
### Import UniProt annotations
counts = update.verifyFile(uniprot_location,'counts')
if force == 'no' or counts > 8: import_uniprot_db(uniprot_location)
else:
### Directly download the data from UniProt
gz_filepath, status = update.download(uniprot_filename_url,uniprot_download_fildir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
import_uniprot_db(uniprot_location)
if add_trembl_annotations == 'yes':
### Import TreMBL annotations
try:
if force == 'yes': uniprot_location += '!!!!!' ### Force an IOError
import_uniprot_db(trembl_location)
except IOError:
### Directly download the data from UniProt
update.download(trembl_filename_url,uniprot_download_fildir,'')
import_uniprot_db(trembl_location)
export()
exportEnsemblUniprot(uniprot_ens_location)
if __name__ == '__main__':
#findSpeciesInUniProtFiles('no'); sys.exit()
getUniProtURLsForAllSupportedSpecies()
sys.exit()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ExtractUniProtFunctAnnot.py
|
ExtractUniProtFunctAnnot.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import math
import reorder_arrays
from build_scripts import ExonArray
from build_scripts import EnsemblImport
from build_scripts import ExonArrayEnsemblRules
try: from build_scripts import JunctionArrayEnsemblRules
except Exception: ### Path error issue which remains partially unresolved
import JunctionArrayEnsemblRules
import export
import RNASeq
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def verifyFile(filename,server_folder):
fn=filepath(filename)
try:
for line in open(fn,'rU').xreadlines():break
except Exception:
import update; reload(update)
if server_folder == None: server_folder = 'AltMouse'
continue_analysis = update.downloadCurrentVersion(filename,server_folder,'')
if continue_analysis == 'no':
print 'The file:\n',filename, '\nis missing and cannot be found online. Please save to the designated directory or contact AltAnalyze support.';sys.exit()
########### Recent code for dealing with comprehensive Affymetrix Junction Arrays
########### Begin Analyses ###########
class ExonAnnotationData:
def Probeset(self): return self._probeset
def ProbesetName(self): return self._psr
def ExonClusterID(self): return self._exon_cluster_id
def setGeneID(self, geneID): self.geneid = geneID
def GeneID(self): return self.geneid
def setTransSplicing(self): self.trans_splicing = 'yes'
def setSecondaryGeneID(self,secondary_geneid): self.secondary_geneid = secondary_geneid
def SecondaryGeneID(self): return self.secondary_geneid
def checkExonPosition(self,exon_pos): return 'left'
def TransSplicing(self): return self.trans_splicing
def EnsemblGeneID(self):
geneid = self._geneid
if 'ENS' in self._geneid:
if ',' in self._geneid:
ens=[]
ids = string.split(self._geneid,',')
for id in ids:
if 'ENS' in id: ens.append(id)
geneid = unique.unique(ens)[-1]
else: geneid=''
return geneid
def EnsemblGeneIDs(self):
geneid = self._geneid
if 'ENS' in self._geneid:
if ',' in self._geneid:
ens=[]
ids = string.split(self._geneid,',')
for id in ids:
if 'ENS' in id: ens.append(id)
geneids = unique.unique(ens)
else: geneids = [self._geneid]
else: geneids=[]
return geneids
def Symbol(self):
try: symbols = string.split(self._symbols,',')
except Exception: symbols = self._symbols
return symbols
def setTranscriptClusterID(self,transcript_cluster): self._transcript_cluster = transcript_cluster
def TranscriptCluster(self):
if self._transcript_cluster[-2:] == '.1':
self._transcript_cluster = self._transcript_cluster[:-2]
return self._transcript_cluster
def setTranscripts(self, transcripts): self.transcripts = transcripts
def EnsemblTranscripts(self): return self.transcripts
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def setStart(self, start): self.start = start
def setEnd(self, end): self.end = end
def Start(self): return self.start
def End(self): return self.end
def setChromosome(self,chr):
self._chromosome_info = chr
def Chromosome(self):
if len(self._chromosome_info)>0:
try:
null,chr = string.split(self._chromosome_info,'=chr')
chromosome,null=string.split(chr,':')
except Exception: chromosome = self._chromosome_info
if chromosome == 'chrM': chromosome = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chromosome == 'M': chromosome = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
else: chromosome = 'not-assinged'
return chromosome
def Strand(self):
if self._strand == '-': self._strand = '-1'
else: self._strand = '1'
return self._strand
def ProbesetClass(self):
###e.g. core, extendended, full
#return self._probest_class
return 'core'
def ExternalExonClusterIDs(self): return self._exon_clusters
def ExternalExonClusterIDList(self):
external_exonid_list = string.split(self.ExternalExonClusterIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def Sequence(self): return string.lower(self._seq)
def JunctionSequence(self): return string.replace(self.Sequence(),'|','')
def JunctionSequences(self):
try: seq1, seq2 = string.split(self.Sequence(),'|')
except Exception:
seq1 = self.Sequence()[:len(self.Sequence())/2]
seq2 = self.Sequence()[-1*len(self.Sequence())/2:]
return seq1, seq2
def Report(self):
output = self.Probeset()
return output
def __repr__(self): return self.Report()
class PSRAnnotation(ExonAnnotationData):
def __init__(self,psr,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_clusters,constitutive,seq,probeset_type):
self._transcript_cluster = transcript_cluster; self._geneid = geneids; self._exon_clusters = exon_clusters;
self._constitutive_status = constitutive; self._symbols = symbols
self._strand = strand; self._chromosome_info = ucsclink; self._probeset = probeset; self._psr = psr; self._seq = seq
self._probeset_type = probeset_type
class EnsemblInformation:
def __init__(self, chr, strand, gene, symbol, description):
self._chr = chr; self._strand = strand; self._gene = gene; self._description = description
self._symbol = symbol
def GeneID(self): return self._gene
def Chromosome(self):
if self._chr == 'chrM': self._chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if self._chr == 'M': self._chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
return self._chr
def Strand(self): return self._strand
def Description(self): return self._description
def Symbol(self): return self._symbol
def __repr__(self): return self.GeneID()
def importEnsemblLiftOverData(filename):
fn=filepath(filename); ens_translation_db={}
print 'importing:',filename
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
tc, new_ens, new_coord = string.split(data,'\t')
ens_translation_db[tc]=new_ens
print len(ens_translation_db), 'Old versus new Ensembl IDs imported (from coordinate liftover and realignment)'
return ens_translation_db
def importJunctionArrayAnnotations(species,array_type,specific_array_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_LiftOverEnsembl.txt'
try: verifyFile(filename,array_type+'/'+specific_array_type) ### Downloads server file if not local
except Exception: null=[]
try: ens_translation_db = importEnsemblLiftOverData(filename)
except Exception: ens_translation_db={}; print "No coordinate LiftOver file present (not supplied for HJAY or MJAY)!!!!"
from build_scripts import EnsemblImport
ens_gene_chr_db = EnsemblImport.importEnsGeneData(species) ### retrieves chromosome and strand info for each gene
ensembl_annotations = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
ensembl_annotation_db = importGeneric(ensembl_annotations)
extraction_type = 'Ensembl'
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
ens_trans_gene_db = importGenericReverse('AltDatabase/ensembl/Hs/Hs_Ensembl_transcript-annotations.txt')
ensembl_symbol_db={}; ensembl_gene_db={}
for ens_geneid in ensembl_annotation_db:
description, symbol = ensembl_annotation_db[ens_geneid]
if ens_geneid in ens_gene_chr_db:
chr,strand = ens_gene_chr_db[ens_geneid]
ei = EnsemblInformation(chr,strand,ens_geneid,symbol,description)
if len(symbol)>0:
try: ensembl_symbol_db[symbol].append(ei)
except KeyError: ensembl_symbol_db[symbol] =[ei]
ensembl_gene_db[ens_geneid] = ei
primary_gene_annotation_export = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
ens_match=0; sym_match=0; ensembl_associations={}; gene_annotation_db={}; missing_count=0
### We want to maximize accurate gene-transcript associations (given the poor state of annotations provided by Affymetrix in these files)
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
try: ens_transcripts = ti.EnsemblTranscripts()
except Exception: ens_transcripts = []
ens_geneids={}; ens_geneid_ls=[]
for gene in ti.EnsemblGeneIDs():
if gene in ens_translation_db and gene not in ensembl_gene_db: ### This is the old lift over method where an old Ens in the annotation file is translated to a more recent ID
gene = ens_translation_db[gene] ### translate the old to new Ensembl
if gene in ensembl_gene_db:
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
if len(ti.EnsemblGeneIDs())>0:
for transcript in ens_transcripts:
try:
gene = ens_trans_gene_db[transcript]
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
except Exception: pass
#if transcript_cluster_id == 'TC01000626.hg.1':
#print ti.EnsemblGeneIDs(), ti.EnsemblTranscripts(); sys.exit()
if transcript_cluster_id in ens_translation_db:
gene = ens_translation_db[transcript_cluster_id] ### translate the TC to new Ensembl
if gene in ensembl_gene_db:
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
for symbol in ti.Symbol():
if symbol in ensembl_symbol_db:
for ei in ensembl_symbol_db[symbol]:
#print [symbol, ei.GeneID(),ti.Chromosome()]; sys.exit()
#print [ei.Chromosome(),ti.Chromosome(),ei.Strand(),ti.Strand()];kill
if ti.Chromosome() != 'not-assinged': ### Valid for HJAY and MJAY arrays
if ei.Chromosome() == ti.Chromosome() and ei.Strand() == ti.Strand():
try: ens_geneids[ei.GeneID()]+=1
except Exception: ens_geneids[ei.GeneID()]=1
sym_match+=1
else: ### Valid for GLU arrays (since Affymetrix decided to change the file formats and content!!!)
try: ens_geneids[ei.GeneID()]+=1
except Exception: ens_geneids[ei.GeneID()]=1
sym_match+=1
for gene in ens_geneids: ens_geneid_ls.append([ens_geneids[gene],gene]) ### Rank these to get Ensembls that have symbol and ID evidence where possible
ens_geneid_ls.sort(); ens_geneid_ls.reverse()
if len(ens_geneid_ls)>0:
ens_geneid = ens_geneid_ls[0][1] ### Best evidence gene association
try: ensembl_associations[transcript_cluster_id].append(ens_geneid)
except KeyError: ensembl_associations[transcript_cluster_id] = [ens_geneid]
ei = ensembl_gene_db[ens_geneid]
gene_annotation_db[transcript_cluster_id]=[ei.Description(),ens_geneid,ei.Symbol(),'']
else:
missing_count+=1
#if missing_count<20: print transcript_cluster_id,ti.EnsemblGeneIDs(),ti.Symbol()
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
### Add TCs based on genomic overlap positions with Ensembl genes
coordinates_to_annotate={}; added_genes=0
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
if ti.Strand() == '-1': strand = '-'
else: strand = '+'
try: coordinates_to_annotate[ti.Chromosome(),strand].append([(ti.Start(),ti.End()),ti])
except Exception: coordinates_to_annotate[ti.Chromosome(),strand] = [[(ti.Start(),ti.End()),ti]]
import RNASeq
limit = 0
RNASeq.alignCoordinatesToGeneExternal(species,coordinates_to_annotate)
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
if transcript_cluster_id not in gene_annotation_db:
try:
if 'ENSG' in ti.GeneID() or 'ENSMUSG' in ti.GeneID():
gene_annotation_db[transcript_cluster_id]=['',ti.GeneID(),ti.Symbol()[0],'']
try: ensembl_associations[transcript_cluster_id].append(ti.GeneID())
except KeyError: ensembl_associations[transcript_cluster_id] = [ti.GeneID()]
added_genes+=1
except Exception:
if limit < 0:# set to 20 - missing are typically retired Ensembl IDs
print transcript_cluster_id
limit+=1
else:
try:
if 'ENSG' in ti.GeneID() or 'ENSMUSG' in ti.GeneID(): added_genes+=1
except Exception: pass
print added_genes
exportDB(primary_gene_annotation_export,gene_annotation_db)
ensembl_associations = eliminate_redundant_dict_values(ensembl_associations)
print ens_match, 'direct Ensembl-Ensembl gene mapping and', sym_match, 'indirect Symbol-chromosome mapping'
print len(tc_ensembl_annotations)-len(ensembl_associations),'unmapped transcript clusters'
print len(gene_annotation_db), 'transcripts with associated valid Ensembl gene IDs'#; sys.exit()
"""
u=0 ### print transcript clusters without gene IDs
for i in tc_ensembl_annotations:
if i not in ensembl_associations:
if u<15:
print i, tc_ensembl_annotations[i].EnsemblGeneID(); u+=1
"""
exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type) ###Use these For LinkEST program
return ensembl_associations
def pickShortestExonIDDiff(exon_to_exon):
if '|' in exon_to_exon: delim = '|'
else: delim = '///'
if delim not in exon_to_exon:
try: five_exon,three_exon=string.split(exon_to_exon,'_to_')
except Exception: print [exon_to_exon];sys.exit()
return five_exon,three_exon
else:
exon_comps = string.split(exon_to_exon,delim); diff_list=[]
for exon_comp in exon_comps:
five_exon,three_exon=string.split(exon_comp,'_to_')
try: diff=abs(int(five_exon[5:])-int(three_exon[5:]))
except Exception: diff=abs(int(five_exon[4:-3])-int(three_exon[4:-3])) #hta
diff_list.append((diff,[five_exon,three_exon]))
diff_list.sort()
return diff_list[0][1]
def importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type):
print 'Importing junction array sequence mapping'
export_dir = 'AltDatabase/'+species+'/'+array_type+'/'
filename = export_dir+string.lower(species[0])+'jay.r2.annotation_map'
if 'lue' in specific_array_type: ### Grab an hGlue specific annotation file
filename = export_dir+string.lower(species[0])+'Glue_3_0_v1.annotation_map_dt.v3.hg18.csv'
elif 'HTA' in specific_array_type:
try: psr_probeset_db = importGenericReverse(export_dir+'probeset-psr.txt')
except Exception:
psr_probeset_db = importGenericReverse(export_dir+species+'_probeset-psr.txt')
if extraction_type == 'Ensembl':
filename = export_dir+'HTA-2_0.na33.hg19.transcript.csv'
type = 'TranscriptCluster'
else:
filename = export_dir+'HTA-2_0.na33.hg19.probeset.csv'
#filename = export_dir+'test.csv'
elif 'MTA' in specific_array_type:
try: psr_probeset_db = importGenericReverse(export_dir+'probeset-psr.txt')
except Exception:
psr_probeset_db = importGenericReverse(export_dir+species+'_probeset-psr.txt')
if extraction_type == 'Ensembl':
filename = export_dir+'MTA-1_0.na35.mm10.transcript.csv'
type = 'TranscriptCluster'
else:
filename = export_dir+'MTA-1_0.na35.mm10.probeset.csv'
#filename = export_dir+'test.csv'
verifyFile(filename,array_type) ### Check's to see if it is installed and if not, downloads or throws an error
fn=filepath(filename)
if extraction_type == 'sequence':
probeset_junctionseq_export = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-junction-seq.txt'
fn2=filepath(probeset_junctionseq_export); dw = open(fn2,'w'); print "Exporting",probeset_junctionseq_export
probeset_translation_db={}; x=0; tc=0; j=0; p=0; k=0; tc_db=(); transcript_cluster_count={}; transcript_cluster_count2={}
global probeset_db; global junction_comp_db; junction_comp_db={}; global junction_alinging_probesets
ps_db={}; jc_db={}; left_ec={}; right_ec={}; psr_ec={}; probeset_db={}; junction_alinging_probesets={}; nonconstitutive_junctions={}
header_row = True; ct=0; probeset_types = {}
for line in open(fn,'r').xreadlines():
#if 'PSR170003198' in line:
if '.csv' in filename:
data = altCleanUpLine(line)
if '"' in data :
t = string.split(data,'"')
new_string = t[0]
for i in t[1:-1]:
if len(i)>1:
if ',' in i[1:-1]: ### can have legitimate commas on the outsides
i = string.replace(i,",",'|')
new_string+=i
new_string+=t[-1]
t = string.split(new_string[:-1],',')
else: t = string.split(data,',')
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
if x<5 or '#' == data[0]: x+=1
elif x>2:
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
if extraction_type != 'Ensembl': type = 'PSR'
### This is the probeset file which has a different structure and up-to-date genomic coordinates (as of hg19)
if header_row:
psr_index = t.index('probeset_id'); si = t.index('strand'); sqi = t.index('seqname')
starti = t.index('start'); endi = t.index('stop')
if type == 'TranscriptCluster':
ai = t.index('mrna_assignment'); gi = t.index('gene_assignment')
else:
pti = t.index('probeset_type'); jse = t.index('junction_start_edge'); jee = t.index('junction_stop_edge')
jsi = t.index('junction_sequence'); tci = t.index('transcript_cluster_id'); xi = t.index('exon_id')
csi = t.index('constituitive')
header_row = False
else:
#probeset_type = t[pti]
#try: probeset_types[probeset_type]+=1
#except Exception: probeset_types[probeset_type]=1
#if probeset_type == 'main':
psr = t[psr_index]
try: probeset = psr_probeset_db[psr]
except Exception: probeset = psr
if type == 'TranscriptCluster':
transcript_annotation = t[ai]; gene_annotation = t[gi]
chr = t[sqi]
strand = t[si]
symbols=[]; ens_transcripts = []; geneids=[]
gene_annotation = string.split(gene_annotation,' /// ')
for ga in gene_annotation:
try: ga = string.split(ga,' // '); symbols = ga[1]
except Exception: pass
if 'ENSG' in transcript_annotation or 'ENSMUSG' in transcript_annotation:
if 'ENSG' in transcript_annotation: delim = 'ENSG'
if 'ENSMUSG' in transcript_annotation: delim = 'ENSMUSG'
try:
ta = string.split(transcript_annotation,delim)[1]
try: ta = string.split(ta,' ')[0]
except Exception: pass
geneids=delim+ta
except Exception: pass
if 'ENST' in transcript_annotation or 'ENSMUST' in transcript_annotation:
if 'ENST' in transcript_annotation: delim = 'ENST'
if 'ENSMUST' in transcript_annotation: delim = 'ENSMUST'
try:
gene_annotation = string.split(transcript_annotation,delim)[1]
try: gene_annotation = string.split(gene_annotation,' ')[0]
except Exception: pass
ens_transcripts = [delim+gene_annotation]
except Exception: pass
#if probeset == 'TC04000084.hg.1':
#print transcript_annotation;sys.exit()
#print probeset, strand, geneids, ens_transcripts, symbols
probeset = probeset[:-2] # remove the .1 or .0 at the end - doesn't match to the probeset annotations
psri = PSRAnnotation(psr,probeset,'',probeset,strand,geneids,symbols,'','','',type)
psri.setChromosome(chr)
try: psri.setStart(int(t[starti]))
except Exception: continue
psri.setEnd(int(t[endi]))
psri.setTranscripts(ens_transcripts)
elif 'JUC' in psr:
type = 'Junction'
exon_cluster = string.split(string.split(t[xi],'///')[0],'_to_') ### grab the first exonIDs
constitutive = t[csi]
transcript_cluster = string.split(t[tci],'///')[0]
chr = t[sqi]; strand = t[si]
if constitutive == 'Non-Constituitive': nonconstitutive_junctions[probeset]=[]
try: five_exon,three_exon = pickShortestExonIDDiff(t[xi])
except Exception:
five_exon,three_exon = exon_cluster
five_EC,three_EC = five_exon,three_exon ### NOT SURE THIS IS CORRECT
junction_alinging_probesets[probeset] = [five_exon,five_exon], [three_exon,three_exon]
seq = t[jsi]
seq = string.lower(string.replace(seq,'|',''))
psri = PSRAnnotation(psr,probeset,'',transcript_cluster,strand,'','',exon_cluster,constitutive,seq,type)
try: junction_start = int(t[jse]); junction_end = int(t[jee])
except Exception: print t;sys.exit()
if '-' in strand: junction_start, junction_end = junction_end,junction_start
exon1s = junction_start-16; exon1e = junction_start
exon2s = junction_end; exon2e = junction_end+16
if '-' in strand:
junction_start, junction_end = junction_end,junction_start
exon1s = junction_start+16; exon1e = junction_start
exon2s = junction_end; exon2e = junction_end-16
psri.setTranscriptClusterID(transcript_cluster)
psri.setChromosome(chr)
#print chr, transcript_cluster, exon1s, exon2s, seq, five_EC, three_EC;sys.exit()
elif 'PSR' in psr:
type = 'Exon'
exon_cluster = string.split(t[xi],'///')[0] ### grab the first exonIDs
constitutive = t[csi]
transcript_cluster = string.split(t[tci],'///')[0]
chr = t[sqi]; strand = t[si]
if constitutive == 'Non-Constituitive': nonconstitutive_junctions[probeset]=[]
five_EC,three_EC = five_exon,three_exon ### NOT SURE THIS IS CORRECT
psri = PSRAnnotation(psr,probeset,'',transcript_cluster,strand,'','',exon_cluster,constitutive,'',type)
exon_start = int(t[starti]); exon_end = int(t[endi])
if '-' in strand: exon_start, exon_end = exon_end,exon_start
psri.setTranscriptClusterID(transcript_cluster)
psri.setChromosome(chr)
elif len(t)==15: ###Transcript Cluster ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count = t
type = 'TranscriptCluster'; seq=''; exon_cluster=''; constitutive=''
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif 'TC' in t[0]: ###Transcript ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count = t[:15]
type = 'TranscriptCluster'; seq=''; exon_cluster=''; constitutive=''; ucsclink = ''
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==28:###Junction ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, junction_number, original_seq, exon_to_exon, observed_speculative, strand, five_PSR, three_PSR, five_EC, three_EC, Rel_5EC, Rel_3EC, constitutive, blat_junction = t
type = 'Junction'; exon_cluster = [five_EC,three_EC]
if constitutive == 'alternative': nonconstitutive_junctions[probeset]=[]
five_exon,three_exon = pickShortestExonIDDiff(exon_to_exon)
junction_alinging_probesets[probeset] = [five_PSR,five_exon], [three_PSR,three_exon]; seq = blat_junction
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==31 and len(t[29])>0: ###Junction ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, original_seq, genomic_position, exon_to_exon, observed_speculative, exon_cluster, constitutive, five_PSR, tr_hits, three_PSR, percent_tr_hits, five_EC, loc_5_3, three_EC, Rel_5EC, Rel_3EC, blat_junction = t
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
type = 'Junction'; exon_cluster = [five_EC,three_EC]; ucsclink = ''
if constitutive == 'alternative': nonconstitutive_junctions[probeset]=[]
five_exon,three_exon = pickShortestExonIDDiff(exon_to_exon)
junction_alinging_probesets[probeset] = [five_PSR,five_exon], [three_PSR,three_exon]; seq = blat_junction
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==24: ###Probeset ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, PSR_region, genome_pos2, strand, exon_cluster, constitutive, TR_hits, percent_TR_hits, location_5to3_percent,seq = t
type = 'Exon'
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==31 and len(t[29])== 0:##Probeset ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, original_seq, genomic_position, exon_to_exon, observed_speculative, exon_cluster, constitutive, five_PSR, tr_hits, three_PSR, percent_tr_hits, five_EC, loc_5_3, three_EC, Rel_5EC, Rel_3EC, seq = t
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
type = 'Exon'; ucsclink = ''
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
else:
#if k<40 and len(t)>5: print len(t),t; k+=1
type = 'null'
#print len(t),data;sys.exit()
### Exon clusters are equivalent to exon blocks in this schema and can be matched between junctions and exons
#if x < 20: print len(t),t[0],type
store = 'yes'
if extraction_type == 'Ensembl':
if type != 'TranscriptCluster': store = 'no'
elif extraction_type == 'sequence':
store = 'no'
if type == 'Exon' or type == 'Junction':
transcript_cluster_count[psri.TranscriptCluster()]=[]
if psri.TranscriptCluster() in ensembl_associations:
ens_geneid = ensembl_associations[psri.TranscriptCluster()][0]
critical_junctions=''
if type == 'Junction':
dw.write(probeset+'\t'+psri.JunctionSequence()+'\t\t\n')
seq = psri.JunctionSequences()[0]; exon_id = probeset+'|5'
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
try: seq_data.setExonStart(exon1s); seq_data.setExonStop(exon1e) ### HTA
except Exception: pass
seq = psri.JunctionSequences()[1]; exon_id = probeset+'|3'
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: seq_data.setExonStart(exon2s); seq_data.setExonStop(exon2e) ### HTA
except Exception: pass
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
transcript_cluster_count2[psri.TranscriptCluster()]=[]
elif type == 'Exon':
dw.write(probeset+'\t'+psri.Sequence()+'\t\t\n')
seq = psri.Sequence(); exon_id = probeset
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: seq_data.setExonStart(exon_start); seq_data.setExonStop(exon_end) ### HTA
except Exception: pass
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
transcript_cluster_count2[psri.TranscriptCluster()]=[]
if store == 'yes':
#if probeset in probeset_db: print probeset; sys.exit()
try: probeset_db[probeset] = psri
except Exception: null=[]
if type == 'TranscriptCluster':
tc+=1
if type == 'Junction':
#print 'here';sys.exit()
j+=1
if extraction_type == 'comparisons':
### Store the left exon-cluster and right exon-cluster for each junction
try: left_ec[five_EC].append(probeset)
except KeyError: left_ec[five_EC]=[probeset]
try: right_ec[three_EC].append(probeset)
except KeyError: right_ec[three_EC]=[probeset]
if type == 'Exon':
p+=1
if extraction_type == 'comparisons':
try: psr_ec[exon_cluster].append(probeset)
except KeyError: psr_ec[exon_cluster]=[probeset]
"""
print 'psid',psid; print 'probeset',probeset; print 'ucsclink',ucsclink
print 'transcript_cluster',transcript_cluster; print 'transcripts',transcripts
print 'geneids',geneids; print 'symbols',symbols; print 'seq',seq; kill"""
x+=1
print 'TCs:',tc, 'Junctions:',j, 'Exons:',p, 'Total:',x; #sys.exit()
#print 'JUC0900017373',probeset_db['JUC0900017373'].Sequence()
#print 'JUC0900017385',probeset_db['JUC0900017385'].Sequence();kill
if extraction_type == 'sequence':
dw.close()
print len(probeset_db),'Entries exported from Junction annotation file'
return probeset_db
if extraction_type == 'Ensembl':
print len(probeset_db),'Entries exported from Junction annotation file'
return probeset_db
if extraction_type == 'comparisons':
global junction_inclusion_db; global ensembl_exon_db; global exon_gene_db
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,'original')
ensembl_exon_db,exon_gene_db = JunctionArrayEnsemblRules.importAndReformatEnsemblJunctionAnnotations(species,array_type,nonconstitutive_junctions)
global failed_db; failed_db={}
global passed_db; passed_db={}
print len(junction_inclusion_db)
identifyCompetitiveJunctions(right_ec,"3'")
identifyCompetitiveJunctions(left_ec,"5'")
print 'len(passed_db)',len(passed_db),'len(failed_db)',len(failed_db)
print 'len(junction_inclusion_db)',len(junction_inclusion_db)
exportUpdatedJunctionComps(species,array_type)
def exportUpdatedJunctionComps(species,array_type,searchChr=None):
db_version = unique.getCurrentGeneDatabaseVersion() ### Only need this since we are exporting to root_dir for RNASeq
if array_type == 'RNASeq': species,root_dir=species
else: root_dir = ''
lines_exported=0
if searchChr !=None:
probeset_junction_export = root_dir+'AltDatabase/'+db_version+'/'+ species + '/'+array_type+'/comps/'+ species + '_junction_comps_updated.'+searchChr+'.txt'
else:
probeset_junction_export = root_dir+'AltDatabase/'+db_version+'/'+ species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
if array_type == 'RNASeq':
data,status = RNASeq.AppendOrWrite(probeset_junction_export) ### Creates a new file or appends if already existing (import is chromosome by chromosome)
else:
data = export.ExportFile(probeset_junction_export); status = 'not found'
if array_type != 'RNASeq': print "Exporting",probeset_junction_export
if status == 'not found':
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title)
for i in junction_inclusion_db:
critical_exons=[]
for ji in junction_inclusion_db[i]:
#value = string.join([ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()],'\t')+'\n'
### Combine all critical exons for a probeset pair
critical_exons.append(ji.CriticalExon())
critical_exons = unique.unique(critical_exons); critical_exons = string.join(critical_exons,'|'); ji.setCriticalExons(critical_exons); lines_exported+=1
data.write(ji.OutputLine())
data.close()
if array_type != 'RNASeq':
print lines_exported,'for',probeset_junction_export
def identifyCompetitiveJunctions(exon_cluster_db,junction_type):
"""To identify critical exons (e.g., the alternatively spliced exon sequence for two alternative exon-junctions), this script:
1) Finds pairs of junctions that contain the same 5' or 3' exon-cluster (genomic overlapping transcript exons)
2) Determines which junction has exons that are closes in genomic space, between the pair of junctions (based on exon-cluster ID number or exon ID)
3) Selects the non-common exon and stores the junction sequence for that exon
4) Selects any exon probeset ID that is annotated as overlapping with the critical exon
The greatest assumption with this method is that the critical exon is choosen based on the numerical ID in the exon-cluster or exon ID (when the exon-clusters
between the two junctions are the same). For example looked at, this appears to be true (e.g., two exons that make up a junction have a difference of 1 in their ID),
but this may not always be the case. Ideally, this method is more extensively tested by evaluating junction and exon sequences mapped to genomic coordinates
and AltAnalyze exon block and region coordinates to verify the critical exon selection."""
passed=0; failed=0; already_added=0
if junction_type == "5'": index = 1
else: index = 0
for ec in exon_cluster_db:
if len(exon_cluster_db[ec])>1:
junction_comps={} ### Calculate all possible pairwise-junction comparisons
for junction1 in exon_cluster_db[ec]:
for junction2 in exon_cluster_db[ec]:
if junction1 != junction2: temp = [junction1,junction2]; temp.sort(); junction_comps[tuple(temp)]=[]
for (junction1,junction2) in junction_comps:
store_data = 'no'
if (junction1,junction2) in junction_inclusion_db or (junction2,junction1) in junction_inclusion_db:
already_added+=1
elif junction1 in ensembl_exon_db and junction2 in ensembl_exon_db: ### Thus, these are mapped to the genome
ed1 = ensembl_exon_db[junction1]; ed2 = ensembl_exon_db[junction2]
ensembl_gene_id = ed1.GeneID()
try: diff1 = ed1.JunctionDistance(); diff2 = ed2.JunctionDistance()
except Exception:
print junction1,junction2
psri1 = probeset_db[junction1]
psri2 = probeset_db[junction2]
print psri1.Probeset(), psri2.Probeset()
kill
### Using the ranked exon-cluster IDs
psri1 = probeset_db[junction1]; exon1a = psri1.ExternalExonClusterIDs()[0]; exon1b = psri1.ExternalExonClusterIDs()[-1]
psri2 = probeset_db[junction2]; exon2a = psri2.ExternalExonClusterIDs()[0]; exon2b = psri2.ExternalExonClusterIDs()[-1]
try: diffX1 = abs(int(exon1a[5:])-int(exon1b[5:])); diffX2 = abs(int(exon2a[5:])-int(exon2b[5:]))
except Exception:
diffX1 = abs(int(exon1a[4:-4])-int(exon1b[4:-4])); diffX2 = abs(int(exon2a[4:-4])-int(exon2b[4:-4]))
junction1_exon_id = ed1.ExonID(); junction2_exon_id = ed2.ExonID()
if diffX1==0 or diffX2==0: null=[] ### splicing occurs within a single exon-cluster
elif diff1<diff2: ### Thus the first junction contains the critical exon
#critical_exon_seq = psri1.JunctionSequences()[index] ### if left most exon in junction is common, then choose the most proximal right exon as critical
incl_junction_probeset = junction1; excl_junction_probeset = junction2
incl_junction_id = junction1_exon_id; excl_junction_id = junction2_exon_id
incl_exon_probeset,incl_exon_id = junction_alinging_probesets[junction1][index]
store_data = 'yes'
elif diff2<diff1:
incl_junction_probeset = junction2; excl_junction_probeset = junction1
incl_junction_id = junction2_exon_id; excl_junction_id = junction1_exon_id
incl_exon_probeset,incl_exon_id = junction_alinging_probesets[junction2][index]
store_data = 'yes'
if store_data == 'yes':
critical_exon_id = string.split(incl_junction_id,'-')[index]; critical_exon_id = string.replace(critical_exon_id,'.','-')
if incl_exon_probeset in ensembl_exon_db:
if (excl_junction_probeset,incl_exon_probeset) in junction_inclusion_db or (incl_exon_probeset,excl_junction_probeset) in junction_inclusion_db:
already_added+=1
else:
critical_exon_id = ensembl_exon_db[incl_exon_probeset]
ji=JunctionArrayEnsemblRules.JunctionInformation(ensembl_gene_id,critical_exon_id,excl_junction_id,critical_exon_id,excl_junction_probeset,incl_exon_probeset,'Affymetrix')
try: junction_inclusion_db[excl_junction_probeset,incl_exon_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_exon_probeset] = [ji]
#value = string.join([ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()],'\t')+'\n'
#print ji.OutputLine();kill
#print [[critical_exon_id,junction2,ed2.ExonID(), ed1.JunctionCoordinates(), ed2.JunctionCoordinates(), diff1,diff2]]
passed+=1
passed_db[junction1,junction2]=[]
ji=JunctionArrayEnsemblRules.JunctionInformation(ensembl_gene_id,critical_exon_id,excl_junction_id,incl_junction_id,excl_junction_probeset,incl_junction_probeset,'Affymetrix')
#print ensembl_gene_id,critical_exon_id,excl_junction_id,incl_junction_id,excl_junction_probeset,incl_junction_probeset;kill
#print [critical_exon_id,junction1,junction2,ed1.ExonID(),ed2.ExonID(), ed1.JunctionCoordinates(), ed2.JunctionCoordinates(), diff1,diff2]
try: junction_inclusion_db[exclusion_junction,inclusion_junction].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
print 'already_added:',already_added,'passed:',passed,'failed:',failed
def identifyJunctionComps(species,array_type,specific_array_type):
### At this point, probeset-to-exon-region associations are built for exon and junction probesets along with critical exons and reciprocol junctions
### Now, associate the reciprocol junctions/critical exons (Ensembl/UCSC based) with junction array probesets and export to junction Hs_junction_comps.txt
JunctionArrayEnsemblRules.getJunctionComparisonsFromExport(species,array_type)
### Next, do this for reciprocal junctions predicted directly from Affymetrix's annotations
extraction_type = 'comparisons'
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
inferJunctionComps(species,array_type)
def filterForCriticalExons(species,array_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
importForFiltering(species,array_type,filename,'exclude_junction_psrs')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_any.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_multiple.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp.txt'
importForFiltering(species,array_type,filename,'exclude_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp.txt'
importForFiltering(species,array_type,filename,'exclude_critical_exon_ids')
def importForFiltering(species,array_type,filename,export_type):
fn=filepath(filename); dbase={}; x = 0
print 'Filtering:',filename
dbase['filename'] = filename
###Import expression data (non-log space)
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line); splitup = 'no'
if x == 0: x=1; dbase['title'] = line; x+=1 ###Grab expression values
if x !=0:
key = string.split(data,'\t')[0]
if ':' in key:
old_key = key
key = string.split(key,':')[1]
line = string.replace(line,old_key,key)
if '|' in key: ### Get rid of |5 or |3
line = string.replace(line,key,key[:-2])
if export_type == 'exclude_critical_exon_ids': splitup = 'yes'
if splitup == 'no':
try: dbase[key].append(line)
except Exception: dbase[key] = [line]
#print len(dbase)
filterExistingFiles(species,array_type,dbase,export_type)
def importGenericAppend(filename,key_db):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericReverse(filename):
db={}
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
db[t[-1]] = t[0]
return db
def importGenericAppendDBList(filename,key_db):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
return key_db
def combineExonJunctionAnnotations(species,array_type):
###Currently used for RNASeq databases to minimize the number of files supplied to the user
collapseSequenceFiles(species,array_type)
overRideJunctionEntriesWithExons(species,array_type)
collapseDomainAlignmentFiles(species,array_type,species+'_Ensembl_domain_aligning_probesets.txt')
collapseDomainAlignmentFiles(species,array_type,species+'_Ensembl_indirect_domain_aligning_probesets.txt')
def collapseDomainAlignmentFiles(species,array_type,filename):
original_filename = 'AltDatabase/'+species+'/'+array_type+'/'+filename
domain_db = importGenericAppendDBList(original_filename,{})
filename = 'AltDatabase/'+species+'/'+array_type+'/junction/'+filename
domain_db = importGenericAppendDBList(filename,domain_db); del domain_db['Probeset']
header = 'Probeset\tInterPro-Description\n'
exportGenericList(domain_db,original_filename,header)
def exportGenericList(db,filename,header):
data_export = export.ExportFile(filename)
if len(header)>0: data_export.write(header)
print 'Re-writing',filename
for key in db:
for i in db[key]: data_export.write(string.join([key]+[i],'\t')+'\n')
data_export.close()
def collapseSequenceFiles(species,array_type):
original_filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_exoncomp.txt'
seq_db = importGenericAppend(original_filename,{})
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/SEQUENCE-protein-dbase_exoncomp.txt'
try: seq_db = importGenericAppend(filename,seq_db)
except Exception: print 'SEQUENCE-protein-dbase_exoncomp.txt - exon version not found'
filename = 'AltDatabase/'+species+'/'+array_type+'/junction/SEQUENCE-protein-dbase_exoncomp.txt'
try: seq_db = importGenericAppend(filename,seq_db)
except Exception: print 'SEQUENCE-protein-dbase_exoncomp.txt - junction version not found'
exportGeneric(seq_db,original_filename,[])
def exportGeneric(db,filename,header):
data_export = export.ExportFile(filename)
if len(header)>0: data_export.write(header)
print 'Re-writing',filename
for key in db:
data_export.write(string.join([key]+db[key],'\t')+'\n')
data_export.close()
def overRideJunctionEntriesWithExons(species,array_type):
filename1 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-protein-annotations-exoncomp.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-domain-annotations-exoncomp.txt'
overRideExistingEntries(filename1,filename2)
def overRideExonEntriesWithJunctions(species,array_type):
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-protein-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-domain-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp-filtered.txt'
overRideExistingEntries(filename1,filename2)
def overRideExistingEntries(file_include,file_exclude):
### Imports two files and over-rides entries in one with another
### These are the filtered entries to replace
fn=filepath(file_include); dbase_include={}; x = 0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
key = string.split(data,'\t')[0]
try: dbase_include[key].append(line)
except Exception: dbase_include[key] = [line]
x+=1
print x;title=''
fn=filepath(file_exclude); dbase_exclude={}; x = 0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1; title = line; x+=1
if x != 0:
key = string.split(data,'\t')[0]
try: dbase_exclude[key].append(line)
except Exception: dbase_exclude[key] = [line]
x+=1
print x
count=0
for key in dbase_exclude: count+=1
print file_exclude, count
count=0
for key in dbase_include:
dbase_exclude[key] = dbase_include[key]
count+=1
print file_exclude, count
dbase_exclude = eliminate_redundant_dict_values(dbase_exclude)
data_export = export.ExportFile(file_exclude)
count=0
print 'Re-writing',file_exclude,'with junction aligned entries.'
try: data_export.write(title)
except Exception: null=[] ### Occurs when no alternative isoforms present for this genome
for key in dbase_exclude:
for line in dbase_exclude[key]:
data_export.write(line); count+=1
data_export.close()
print count
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
class JunctionInformationSimple:
def __init__(self,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset):
self._critical_exon = critical_exon; self.excl_junction = excl_junction; self.incl_junction = incl_junction
self.excl_probeset = excl_probeset; self.incl_probeset = incl_probeset
#self.critical_exon_sets = string.split(critical_exon,'|')
self.critical_exon_sets = [critical_exon]
def CriticalExon(self):
ce = str(self._critical_exon)
if '-' in ce: ce = string.replace(ce,'-','.')
return ce
def CriticalExonList(self):
critical_exon_str = self.CriticalExon()
critical_exons = string.split(critical_exon_str,'|')
return critical_exons
def setCriticalExons(self,critical_exons): self._critical_exon = critical_exons
def setCriticalExonSets(self,critical_exon_sets): self.critical_exon_sets = critical_exon_sets
def setInclusionProbeset(self,incl_probeset): self.incl_probeset = incl_probeset
def setInclusionJunction(self,incl_junction): self.incl_junction = incl_junction
def CriticalExonSets(self): return self.critical_exon_sets ### list of critical exons (can select any or all for functional analysis)
def InclusionJunction(self): return self.incl_junction
def ExclusionJunction(self): return self.excl_junction
def InclusionProbeset(self): return self.incl_probeset
def ExclusionProbeset(self): return self.excl_probeset
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def __repr__(self): return self.GeneID()
def getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir):
alt_junction_db={}; critical_exon_db={}; critical_agglomerated={}; exon_inclusion_agglom={}; incl_junctions_agglom={}; exon_dbase={}
exon_inclusion_db={}; comparisons=0
### Previously, JunctionArrayEnsemblRules.reimportJunctionComps (see above) used for import---> too memory intensive
if array_type == 'junction': root_dir=''
filename = root_dir+'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
fn=filepath(filename); junction_inclusion_db={}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); junction_info=[]
gene,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset,source = string.split(data,'\t')
if source == 'AltAnalyze': novel_exon = 'known'
else: novel_exon = 'novel'
"""
if gene == 'ENSG00000140464':
a=0; b=0
if excl_probeset in exon_db: a = 1
if incl_probeset in exon_db: b = 1
#print incl_probeset, a, b, excl_probeset, critical_exon
"""
try:
null=exon_db[excl_probeset] ### Exclusion needs to be present
if incl_probeset in exon_db:
ji = JunctionInformationSimple(critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset)
junction_info.append(ji)
ji.setNovelEvent(novel_exon) ### Indicates known or novel splicing event
#print [ji.InclusionProbeset(),ji.ExclusionProbeset()]
if array_type == 'RNASeq':
critical_exons = string.split(critical_exon,'|')
for ce in critical_exons:
critical_exon_probeset = gene+':'+ce
ji=JunctionInformationSimple(ce,excl_junction,ce,excl_probeset,critical_exon_probeset)
junction_info.append(ji); ji.setInclusionLookup(incl_probeset) ### Use this ID to get protein and domain annotations
ji.setNovelEvent(novel_exon) ### Indicates known or novel splicing event
"""
if gene == 'ENSG00000140464' and ce == 'E5.2':
a=0; b=0
if ji.ExclusionProbeset() in exon_db: a = 1
if ji.InclusionProbeset() in exon_db: b = 1
print [ji.InclusionProbeset()],a,b;kill
"""
#print [ji.InclusionProbeset(),ji.ExclusionProbeset()];kill
for ji in junction_info:
try:
geneid=exon_db[ji.InclusionProbeset()].GeneID() ### This inclusion needs to be present
if agglomerate_inclusion_probesets == 'yes':
exclProbeset = ji.ExclusionProbeset(); inclProbeset = ji.InclusionProbeset()
exon_inclusion_agglom[exclProbeset] = ji ### Just need one example
try: critical_exon_db[exclProbeset].append(ji.CriticalExon())
except Exception: critical_exon_db[exclProbeset]=[ji.CriticalExon()]
try: critical_agglomerated[exclProbeset]+=ji.CriticalExonList()
except Exception: critical_agglomerated[exclProbeset]=ji.CriticalExonList()
try: incl_junctions_agglom[exclProbeset].append(ji.InclusionJunction())
except Exception: incl_junctions_agglom[exclProbeset]=[ji.InclusionJunction()]
try: exon_inclusion_db[exclProbeset].append(inclProbeset)
except Exception: exon_inclusion_db[exclProbeset]=[inclProbeset]
else:
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
comparisons+=1
except KeyError: null=[]
except KeyError: null=[]
#print comparisons, "Junction comparisons in database"
if agglomerate_inclusion_probesets == 'yes':
alt_junction_agglom={}
for excl in exon_inclusion_db:
ji = exon_inclusion_agglom[excl]
ed = exon_db[ji.InclusionProbeset()]; ed1 = ed
geneid = ed.GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
critical_exon_sets = unique.unique(critical_exon_db[excl])
incl_probesets = unique.unique(exon_inclusion_db[excl])
exon_inclusion_db[excl] = incl_probesets
critical_exons = unique.unique(critical_agglomerated[excl]); critical_exons.sort()
incl_junctions = unique.unique(incl_junctions_agglom[excl]); incl_junctions.sort()
ji.setCriticalExons(string.join(critical_exons,'|'))
ji.setInclusionJunction(string.join(incl_junctions,'|'))
ji.setInclusionProbeset(string.join(incl_probesets,'|'))
ji.setCriticalExonSets(critical_exon_sets)
ed1.setProbeset(string.replace(incl_probesets[0],'@',':')) ### Actually needs to be the first entry to match of re-import of a filtered list for exon_db (full not abbreviated)
#if '|' in ji.InclusionProbeset(): print ji.InclusionProbeset(), string.replace(incl_probesets[0],'@',':');sys.exit()
#print string.join(incl_probesets,'|'),ji.InclusionProbeset();kill
### Create new agglomerated inclusion probeset entry
#ed1.setProbeset(ji.InclusionProbeset()) ### Agglomerated probesets
ed1.setDisplayExonID(string.join(incl_junctions,'|'))
exon_db[ji.InclusionProbeset()] = ed1 ### Agglomerated probesets
#if 'ENSMUSG00000032497:E23.1-E24.1' in ji.InclusionProbeset():
#print ji.InclusionProbeset();sys.exit()
#if '198878' in ji.InclusionProbeset(): print ji.InclusionProbeset(),excl
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
del exon_inclusion_agglom
critical_exon_db={}
if array_type == 'RNASeq':
### Need to remove the @ from the IDs
for e in exon_inclusion_db:
incl_probesets=[]
for i in exon_inclusion_db[e]:
incl_probesets.append(string.replace(i,'@',':'))
exon_inclusion_db[e] = incl_probesets
#clearObjectsFromMemory(junction_inclusion_db); junction_inclusion_db=[]
critical_agglomerated=[];exon_inclusion_agglom={}; incl_junctions_agglom={}
""" Not used for junction or RNASeq platforms
if array_type == 'AltMouse':
for probeset in array_id_db:
try:
geneid = exon_db[probeset].GeneID()
exons = exon_db[probeset].ExonID()
exon_dbase[geneid,exons] = probeset
except Exception: null=[]
"""
#print '--------------------------------------------'
### Eliminate redundant entries
objects_to_delete=[]
for geneid in alt_junction_db:
junction_temp_db={}; junction_temp_ls=[]
for ji in alt_junction_db[geneid]: ### Redundant entries can be present
id = ji.ExclusionProbeset(),ji.InclusionProbeset()
if id in junction_temp_db: objects_to_delete.append(ji)
else: junction_temp_db[id]=ji
for i in junction_temp_db:
ji = junction_temp_db[i]; junction_temp_ls.append(ji)
alt_junction_db[geneid]=junction_temp_ls
"""
for ji in alt_junction_db['ENSG00000140464']:
print ji.ExclusionProbeset(), ji.InclusionProbeset(), ji.CriticalExon(), ji.ExclusionJunction(), ji.InclusionJunction()
kill
"""
clearObjectsFromMemory(objects_to_delete); objects_to_delete=[]
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def getPutativeSpliceEventsOriginal(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir):
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps((species,root_dir),array_type,'updated')
alt_junction_db={}; critical_exon_db={}; critical_agglomerated={}; exon_inclusion_agglom={}; incl_junctions_agglom={}; exon_dbase={}
exon_inclusion_db={}; comparisons=0
for i in junction_inclusion_db:
critical_exons=[]
for ji in junction_inclusion_db[i]:
#ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()
if agglomerate_inclusion_probesets == 'yes':
if ji.InclusionProbeset() in exon_db and ji.ExclusionProbeset() in exon_db:
if array_type == 'RNASeq':
exclProbeset = ji.ExclusionProbeset(); inclProbeset=JunctionArrayEnsemblRules.formatID(ji.InclusionProbeset())
else: exclProbeset = ji.ExclusionProbeset(); inclProbeset = ji.InclusionProbeset()
exon_inclusion_agglom[exclProbeset] = ji ### Just need one example
try: critical_exon_db[exclProbeset].append(ji.CriticalExon())
except Exception: critical_exon_db[exclProbeset]=[ji.CriticalExon()]
try: critical_agglomerated[exclProbeset]+=ji.CriticalExonList()
except Exception: critical_agglomerated[exclProbeset]=ji.CriticalExonList()
try: incl_junctions_agglom[exclProbeset].append(ji.InclusionJunction())
except Exception: incl_junctions_agglom[exclProbeset]=[ji.InclusionJunction()]
try: exon_inclusion_db[exclProbeset].append(inclProbeset)
except Exception: exon_inclusion_db[exclProbeset]=[inclProbeset]
else:
try:
geneid = exon_db[ji.InclusionProbeset()].GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
comparisons+=1
except Exception: geneid = ji.GeneID() ### If not in the local user datasets (don't think these genes need to be added)
#print comparisons, "Junction comparisons in database"
if agglomerate_inclusion_probesets == 'yes':
alt_junction_agglom={}
for excl in exon_inclusion_db:
ji = exon_inclusion_agglom[excl]
ed = exon_db[ji.InclusionProbeset()]; ed1 = ed
geneid = ed.GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
critical_exon_sets = unique.unique(critical_exon_db[excl])
incl_probesets = unique.unique(exon_inclusion_db[excl])
exon_inclusion_db[excl] = incl_probesets
critical_exons = unique.unique(critical_agglomerated[excl]); critical_exons.sort()
incl_junctions = unique.unique(incl_junctions_agglom[excl]); incl_junctions.sort()
ji.setCriticalExons(string.join(critical_exons,'|'))
ji.setInclusionJunction(string.join(incl_junctions,'|'))
ji.setInclusionProbeset(string.join(incl_probesets,'|'))
ji.setCriticalExonSets(critical_exon_sets)
ed1.setProbeset(string.replace(incl_probesets[0],'@',':')) ### Actually needs to be the first entry to match of re-import of a filtered list for exon_db (full not abbreviated)
#if '|' in ji.InclusionProbeset(): print ji.InclusionProbeset(), string.replace(incl_probesets[0],'@',':');sys.exit()
#print string.join(incl_probesets,'|'),ji.InclusionProbeset();kill
### Create new agglomerated inclusion probeset entry
#ed1.setProbeset(ji.InclusionProbeset()) ### Agglomerated probesets
ed1.setDisplayExonID(string.join(incl_junctions,'|'))
exon_db[ji.InclusionProbeset()] = ed1 ### Agglomerated probesets
#if 'ENSMUSG00000032497:E23.1-E24.1' in ji.InclusionProbeset():
#print ji.InclusionProbeset();sys.exit()
#if '198878' in ji.InclusionProbeset(): print ji.InclusionProbeset(),excl
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
del exon_inclusion_agglom
critical_exon_db={}
if array_type == 'RNASeq':
### Need to remove the @ from the IDs
for e in exon_inclusion_db:
incl_probesets=[]
for i in exon_inclusion_db[e]:
incl_probesets.append(string.replace(i,'@',':'))
exon_inclusion_db[e] = incl_probesets
### Eliminate redundant entries
objects_to_delete=[]
for geneid in alt_junction_db:
junction_temp_db={}; junction_temp_ls=[]
for ji in alt_junction_db[geneid]: ### Redundant entries can be present
id = ji.ExclusionProbeset(),ji.InclusionProbeset()
if id in junction_temp_db: objects_to_delete.append(ji)
else: junction_temp_db[id]=ji
for i in junction_temp_db:
ji = junction_temp_db[i]; junction_temp_ls.append(ji)
alt_junction_db[geneid]=junction_temp_ls
clearObjectsFromMemory(objects_to_delete); objects_to_delete=[]
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def filterExistingFiles(species,array_type,db,export_type):
"""Remove probesets entries (including 5' and 3' junction exons) from the database that don't indicate possible critical exons"""
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'probesets',{})
critical_junction_db = {}; critical_probeset_db={}; crit1={}
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,'updated')
for ids in junction_inclusion_db:
for jd in junction_inclusion_db[ids]:
critical_exon_id = jd.ParentCriticalExon()
critical_id = jd.GeneID()+':'+jd.CriticalExon()
critical_exon_ids = string.split(critical_exon_id,'|')
critical_junction_db[jd.ExclusionProbeset(),jd.InclusionProbeset()]=critical_exon_ids,critical_id
crit1[critical_id]=[]
"""
for id in crit1:
if 'ENSMUSG00000066842' in id: print id
stop
"""
#print len(crit1);
crit2={}
for (pX,probeset) in critical_junction_db:
###Keep only junction probesets that contain possible critical exons
p1 = probeset+'|5'; p2 = probeset+'|3'
c1s,critical_id = critical_junction_db[(pX,probeset)]; proceed = 'no'
#print p1, p2, c1s, critical_id
#for probeset in db: print [probeset];kill
if probeset in ensembl_probeset_db and probeset in db:
critical_probeset_db[probeset,critical_id]=db[probeset]
crit2[probeset]=[]
else:
if p1 in ensembl_probeset_db and p1 in db:
c2s = ensembl_probeset_db[p1]; p = p1
c2s = string.split(c2s,'|')
for c1 in c1s:
if c1 in c2s:
critical_probeset_db[p,critical_id]=db[p]
crit2[probeset]=[]
if p2 in ensembl_probeset_db and p2 in db:
c2s = ensembl_probeset_db[p2]; p = p2
c2s = string.split(c2s,'|')
for c1 in c1s:
if c1 in c2s:
critical_probeset_db[p,critical_id]=db[p]
crit2[probeset]=[]
for probeset in ensembl_probeset_db: ### For non-junction probesets
if '|' not in probeset:
if probeset in db: critical_probeset_db[probeset,probeset]=db[probeset]; crit2[probeset]=[]
critical_probeset_db = eliminate_redundant_dict_values(critical_probeset_db)
print len(crit2),'len(crit2)'
x=0
"""
for probeset in db:
if probeset not in crit2:
x+=1
if x<20: print probeset """
print len(critical_probeset_db),': length of filtered db', len(db), ': length of db'
"""
for probeset in ensembl_probeset_db:
###Keep only probesets that contain possible critical exons
if '|' in probeset:
if probeset[:-2] in critical_junction_db and probeset in db:
critical_probeset_db[probeset[:-2]]=db[probeset]
elif probeset in db: critical_probeset_db[probeset]=db[probeset] """
"""
for id in critical_probeset_db:
if 'ENSMUSG00000066842' in id[1]: print id
stop
"""
if export_type == 'exclude_junction_psrs':
critical_probeset_db['title'] = db['title']
critical_probeset_db['filename'] = db['filename']
exportFiltered(critical_probeset_db)
else:
for p in db:
if '|' not in p: probeset = p
else: probeset = p[:-2]
if probeset not in crit2:
### Add back any junction probesets that do not have a critical exon component
critical_probeset_db[probeset,probeset]=db[p]
if export_type == 'exclude_critical_exon_ids':
critical_probeset_db2={}
for (p,cid) in critical_probeset_db:
if ':' in cid or '|' in p:
critical_probeset_db2[p[:-2],p[:-2]] = critical_probeset_db[(p,cid)]
else: critical_probeset_db2[p,p] = critical_probeset_db[(p,cid)]
critical_probeset_db = critical_probeset_db2
critical_probeset_db['title'] = db['title']
critical_probeset_db['filename'] = db['filename']
exportFiltered(critical_probeset_db)
########### Code originally designed for AltMouseA array database builds (adapted for use with Mouse and Human Junction Arrays)
def filterExpressionData(filename1,filename2,pre_filtered_db,constitutive_db):
fn2=filepath(filename2)
probeset_translation_db={}
###Import probeset number/id relationships (note: forced to use numeric IDs for Plier/Exact analysis)
if analysis_method != 'rma':
for line in open(fn2,'r').xreadlines():
data = cleanUpLine(line)
probeset_number,probeset_id = string.split(data,'\t')
probeset_translation_db[probeset_number]=probeset_id
fn=filepath(filename1)
exp_dbase={}; d = 0; x = 0
###Import expression data (non-log space)
try:
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if data[0] != '#' and x == 1: ###Grab expression values
tab_delimited_data = string.split(data,'\t')
z = len(tab_delimited_data)
probeset = tab_delimited_data[0]
if analysis_method == 'rma': exp_vals = tab_delimited_data[1:]
else: exp_vals = convertToLog2(tab_delimited_data[1:])
###Filter results based on whether a sufficient number of samples where detected as Present
if probeset in pre_filtered_db:
if probeset in probeset_translation_db: original_probeset_id = probeset_translation_db[probeset]
else: original_probeset_id = probeset ###When p-values are generated outside of Plier
if original_probeset_id in constitutive_db:
percent_present = pre_filtered_db[probeset]
if percent_present > 0.99: exp_dbase[original_probeset_id] = exp_vals
#else: print percent_present,original_probeset_id; kill
else: exp_dbase[original_probeset_id] = exp_vals
elif data[0] != '#' and x == 0: ###Grab labels
array_names = []
tab_delimited_data = string.split(data,'\t')
for entry in tab_delimited_data: array_names.append(entry)
x += 1
except IOError: exp_dbase = exp_dbase
print len(exp_dbase),"probesets imported with expression values"
###If the arrayid column header is missing, account for this
if len(array_names) == z:
array_names = array_names[1:]
null,filename = string.split(filename1,'\\')
filtered_exp_export = 'R_expression_raw_data\\'+filename[:-4]+'-filtered.txt'
fn=filepath(filtered_exp_export); data = open(fn,'w'); title = 'probeset_id'
for array in array_names: title = title +'\t'+ array
data.write(title+'\n')
for probeset in exp_dbase:
exp_vals = probeset
for exp_val in exp_dbase[probeset]:
exp_vals = exp_vals +'\t'+ str(exp_val)
data.write(exp_vals+'\n')
data.close()
#return filtered_exp_export
def convertToLog2(data_list):
new_list=[]
for item in data_list:
new_list.append(math.log(float(item)+1,2))
return new_list
def getAnnotations(filename,p,Species,Analysis_Method,constitutive_db):
global species; species = Species
global analysis_method; analysis_method = Analysis_Method
array_type = 'AltMouse'
filtered_junctions_list = ExonArray.getFilteredExons(filename,p)
probe_id_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-probeset_translation.txt'
filtered_exp_export_file = filterExpressionData(filename,probe_id_translation_file,filtered_junctions_list,constitutive_db)
return filtered_exp_export_file
def altCleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
return data
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importGeneric(filename):
verifyFile(filename,None)
fn=filepath(filename); key_db = {}; x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(t[1:]) == 1:
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
else: key_db[t[0]] = t[1:]
return key_db
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def importAnnotateCriticalExonSequences(species,array_type):
ensembl_associations = importArrayAnnotations(species,array_type)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+ array_type+'_critical-exon-seq.txt'
critical_exon_seq_db = importCriticalExonSeq(filename,array_type,ensembl_associations)
return critical_exon_seq_db
def importArrayAnnotations(species,array_type):
primary_gene_annotation_file = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
ensembl_array_gene_annotation_file = 'AltDatabase/'+species+'/'+ array_type + '/'+array_type+ '-Ensembl.txt'
ensembl_annotations = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
verifyFile(primary_gene_annotation_file,array_type)
verifyFile(ensembl_array_gene_annotation_file,array_type)
verifyFile(ensembl_annotations,array_type)
array_gene_annotations = importGeneric(primary_gene_annotation_file)
ensembl_associations = importGeneric(ensembl_array_gene_annotation_file)
ensembl_annotation_db = importGeneric(ensembl_annotations)
ensembl_symbol_db={}
for ens_geneid in ensembl_annotation_db:
description, symbol = ensembl_annotation_db[ens_geneid]
#print symbol;klll
if len(symbol)>0:
try: ensembl_symbol_db[symbol].append(ens_geneid)
except KeyError: ensembl_symbol_db[symbol] =[ens_geneid]
### Update array Ensembl annotations
for array_geneid in array_gene_annotations:
t = array_gene_annotations[array_geneid]; description=t[0];entrez=t[1];symbol=t[2]
if symbol in ensembl_symbol_db:
ens_geneids = ensembl_symbol_db[symbol]
for ens_geneid in ens_geneids:
try: ensembl_associations[array_geneid].append(ens_geneid)
except KeyError: ensembl_associations[array_geneid] = [ens_geneid]
ensembl_associations = eliminate_redundant_dict_values(ensembl_associations)
exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type) ###Use these For LinkEST program
return ensembl_associations
def exportDB(filename,db):
fn=filepath(filename); data = open(fn,'w')
for key in db:
try: values = string.join([key]+db[key],'\t')+'\n'; data.write(values)
except Exception: print key,db[key];sys.exit()
data.close()
def exportFiltered(db):
filename = db['filename']; title = db['title']
filename = string.replace(filename,'.txt','-filtered.txt')
print 'Writing',filename
del db['filename']; del db['title']
fn=filepath(filename); data = open(fn,'w'); data.write(title)
for (old,new) in db:
for line in db[(old,new)]: ### Replace the old ID with the new one
if old not in line and '|' in old:
old = old[:-2]
if ('miR-'+new) in line: ### Occurs when the probeset is a number found in the miRNA name
line = string.replace(line,'miR-'+new,'miR-'+old)
line = string.replace(line,old,new); data.write(line)
data.close()
def exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type):
annotation_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl_relationships.txt'
fn=filepath(annotation_db_filename); data = open(fn,'w')
title = ['ArrayGeneID','Ensembl']; title = string.join(title,'\t')+'\n'
data.write(title)
for array_geneid in ensembl_associations:
for ens_geneid in ensembl_associations[array_geneid]:
values = [array_geneid,ens_geneid]; values = string.join(values,'\t')+'\n'; data.write(values)
data.close()
def exportCriticalExonLocations(species,array_type,critical_exon_seq_db):
location_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations.txt'
fn=filepath(location_db_filename); data = open(fn,'w')
title = ['Affygene','ExonID','Ensembl','start','stop','gene-start','gene-stop','ExonSeq']; title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in critical_exon_seq_db:
for cd in critical_exon_seq_db[ens_geneid]:
try:
values = [cd.ArrayGeneID(),cd.ExonID(),ens_geneid,cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()]
values = string.join(values,'\t')+'\n'
data.write(values)
except AttributeError:
#print cd.ArrayGeneID(), cd.ExonID()
#print cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()
#sys.exit()
pass
data.close()
class ExonSeqData:
def __init__(self,exon,array_geneid,probeset_id,critical_junctions,critical_exon_seq):
self._exon = exon; self._array_geneid = array_geneid; self._critical_junctions = critical_junctions
self._critical_exon_seq = critical_exon_seq; self._probeset_id = probeset_id
def ProbesetID(self): return self._probeset_id
def ArrayGeneID(self): return self._array_geneid
def ExonID(self): return self._exon
def CriticalJunctions(self): return self._critical_junctions
def ExonSeq(self): return string.upper(self._critical_exon_seq)
def setExonStart(self,exon_start):
try: self._exon_start = self._exon_start ### If it already is set from the input file, keep it
except Exception: self._exon_start = exon_start
def setExonStop(self,exon_stop):
try: self._exon_stop = self._exon_stop ### If it already is set from the input file, keep it
except Exception: self._exon_stop = exon_stop
def setGeneStart(self,gene_start): self._gene_start = gene_start
def setGeneStop(self,gene_stop): self._gene_stop = gene_stop
def ExonStart(self): return str(self._exon_start)
def ExonStop(self): return str(self._exon_stop)
def GeneStart(self): return str(self._gene_start)
def GeneStop(self): return str(self._gene_stop)
def importCriticalExonSeq(filename,array_type,ensembl_associations):
verifyFile(filename,array_type)
fn=filepath(filename); key_db = {}; x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
arraygeneid_exon,critical_junctions,critical_exon_seq = string.split(data,'\t')
if len(critical_exon_seq)>5:
array_geneid, exon = string.split(arraygeneid_exon,':')
if array_geneid in ensembl_associations:
ens_geneids = ensembl_associations[array_geneid]
for ens_geneid in ens_geneids:
seq_data = ExonSeqData(exon,array_geneid,arraygeneid_exon,critical_junctions,critical_exon_seq)
try: key_db[ens_geneid].append(seq_data)
except KeyError: key_db[ens_geneid] = [seq_data]
return key_db
def updateCriticalExonSequences(array_type, filename,ensembl_probeset_db):
exon_seq_db_filename = filename[:-4]+'_updated.txt'
fn=filepath(exon_seq_db_filename); data = open(fn,'w')
critical_exon_seq_db={}
for ens_gene in ensembl_probeset_db:
for probe_data in ensembl_probeset_db[ens_gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: critical_exon_seq_db[probeset_id] = ed.ExonSeq()
except AttributeError: null=[] ### Occurs when no sequence data is associated with exon (probesets without exon associations)
ensembl_probeset_db=[]; key_db = {}; x = 0
if array_type == 'AltMouse':
fn1=filepath(filename)
verifyFile(filename,array_type)
for line in open(fn1,'rU').xreadlines():
line_data = cleanUpLine(line)
if x == 0: x = 1; data.write(line)
else:
arraygeneid_exon,critical_junctions,critical_exon_seq = string.split(line_data,'\t')
if arraygeneid_exon in critical_exon_seq_db:
critical_exon_seq = critical_exon_seq_db[arraygeneid_exon]
values = [arraygeneid_exon,critical_junctions,critical_exon_seq]
values = string.join(values,'\t')+'\n'
data.write(values)
else: data.write(line)
elif array_type == 'junction':
### We don't need any of the additional information used for AltMouse arrays
for probeset in critical_exon_seq_db:
critical_exon_seq = critical_exon_seq_db[probeset]
if ':' in probeset:
probeset = string.split(probeset,':')[1]
values = [probeset,'',critical_exon_seq]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
print exon_seq_db_filename, 'exported....'
def inferJunctionComps(species,array_type,searchChr=None):
if len(array_type) == 3:
### This indicates that the ensembl_probeset_db is already included
array_type,ensembl_probeset_db,root_dir = array_type
comps_type = ''
else:
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'junction-regions',{})
comps_type = 'updated'; root_dir = ''
if array_type != 'RNASeq':
print "Import junction probeset region IDs for",species
print "Preparing region IDs for analysis of possible reciprocal junctions"
putative_as_junction_db={}; probeset_juntion_db={}; common_exon_blocks_exon={}; common_exon_blocks_intron={}; count=0
for gene in ensembl_probeset_db:
for (probeset,regionid) in ensembl_probeset_db[gene]:
regionids = string.split(regionid,'|')
for regionid in regionids:
if '-' in regionid:
novel_5p=False; novel_3p=False
if 'I' in regionid: exons_type = 'exon-intron'
else: exons_type = 'exons'
exon_5prime_original, exon_3prime_original = string.split(regionid,'-')
exon_5prime = string.split(exon_5prime_original,'.')
if '_' in exon_5prime[1]:
exon_5prime[1] = float(string.replace(exon_5prime[1],'_','.'))
novel_5p=True
else: exon_5prime[1] = int(exon_5prime[1])
e1a3 = (int(exon_5prime[0][1:]),int(exon_5prime[1])) ### The first is an int for the region - since it hybs early
e1a5 = (int(exon_5prime[0][1:]),exon_5prime[1])
e1 = e1a3, e1a5
exon_3prime = string.split(exon_3prime_original,'.')
if '_' in exon_3prime[1]:
exon_3prime[1] = float(string.replace(exon_3prime[1],'_','.'))
novel_3p=True
else:
try: exon_3prime[1] = int(exon_3prime[1])
except Exception: print exon_3prime;kill
e2a3 = (int(exon_3prime[0][1:]),exon_3prime[1])
e2a5 = (int(exon_3prime[0][1:]),int(exon_3prime[1])) ### The second is an int for the region - since it hybs late
e2 = e2a3, e2a5
if exons_type == 'exons':
if novel_5p and novel_3p:
None ### Ignore junctions where both the 5' and 3' splice sites are novel -> like false positives
### If you include these with novel junction discovery in TopHat, you can get a huge memory issue in compareJunctions
else:
count+=1
try: putative_as_junction_db[gene].append((e1,e2))
except Exception: putative_as_junction_db[gene] = [(e1,e2)]
### This matches the recorded junction ID from EnsemblImport.compareJunctions()
try: probeset_juntion_db[gene,(e1a5,e2a3)].append(probeset)
except Exception: probeset_juntion_db[gene,(e1a5,e2a3)] = [probeset]
### Defines exon-intron and exon-exon reciprical junctions based on shared exon blocks
block = e1a3[0]; side = 'left'
try: common_exon_blocks_exon[side,gene,block].append([regionid,probeset])
except KeyError: common_exon_blocks_exon[side,gene,block] = [[regionid,probeset]]
block = e2a3[0]; side = 'right'
try: common_exon_blocks_exon[side,gene,block].append([regionid,probeset])
except KeyError: common_exon_blocks_exon[side,gene,block] = [[regionid,probeset]]
else:
### Defines exon-intron and exon-exon reciprical junctions based on shared exon blocks
### In 2.0.8 we expanded the search criterion here so that each side and exon-block are searched for matching junctions (needed for confirmatory novel exons)
if 'I' in exon_5prime or 'I' in exon_5prime[0]: ### Can be a list with the first object being the exon annotation
block = e2a3[0]; side = 'right'; critical_intron = exon_5prime_original
alt_block = e1a3[0]; alt_side = 'left'
else:
block = e1a3[0]; side = 'left'; critical_intron = exon_3prime_original
alt_block = e2a3[0]; alt_side = 'right'
#if gene == 'ENSG00000112695':
#print critical_intron,regionid,probeset, exon_5prime_original, exon_3prime_original, exon_5prime
try: common_exon_blocks_intron[side,gene,block].append([regionid,probeset,critical_intron])
except KeyError: common_exon_blocks_intron[side,gene,block] = [[regionid,probeset,critical_intron]]
### Below added in 2.0.8 to accomidate for a broader comparison of reciprocol splice junctions
try: common_exon_blocks_intron[alt_side,gene,alt_block].append([regionid,probeset,critical_intron])
except KeyError: common_exon_blocks_intron[alt_side,gene,alt_block] = [[regionid,probeset,critical_intron]]
if array_type != 'RNASeq':
print count, 'probed junctions being compared to identify putative reciprocal junction comparisons'
critical_exon_db, critical_gene_junction_db = EnsemblImport.compareJunctions(species,putative_as_junction_db,{},rootdir=root_dir, searchChr=searchChr)
if array_type != 'RNASeq':
print len(critical_exon_db),'genes with alternative reciprocal junctions pairs found'
global junction_inclusion_db; count=0; redundant=0; junction_annotations={}; critical_exon_annotations={}
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,(comps_type,ensembl_probeset_db))
for gene in critical_exon_db:
for sd in critical_exon_db[gene]:
junction_pairs = getJunctionPairs(sd.Junctions())
"""
if len(junction_pairs)>1 and len(sd.CriticalExonRegion())>1:
print
.Junctions()
print sd.CriticalExonRegion();kill"""
for (junction1,junction2) in junction_pairs:
critical_exon = sd.CriticalExonRegion()
excl_junction,incl_junction = determineExclIncl(junction1,junction2,critical_exon)
incl_junction_probeset = probeset_juntion_db[gene,incl_junction][0]
excl_junction_probeset = probeset_juntion_db[gene,excl_junction][0]
source = 'Inferred'
incl_junction=formatJunctions(incl_junction)
excl_junction=formatJunctions(excl_junction)
critical_exon=string.replace(formatJunctions(critical_exon),'-','|'); count+=1
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
#if gene == 'ENSG00000112695':# and 'I' in critical_exon:
#print critical_exon,'\t', incl_junction,'\t',excl_junction_probeset,'\t',incl_junction_probeset
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except KeyError: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
junction_str = string.join([excl_junction,incl_junction],'|')
#splice_event_str = string.join(sd.SpliceType(),'|')
try: junction_annotations[ji.InclusionProbeset()].append((junction_str,sd.SpliceType()))
except KeyError: junction_annotations[ji.InclusionProbeset()] = [(junction_str,sd.SpliceType())]
try: junction_annotations[ji.ExclusionProbeset()].append((junction_str,sd.SpliceType()))
except KeyError: junction_annotations[ji.ExclusionProbeset()] = [(junction_str,sd.SpliceType())]
critical_exons = string.split(critical_exon,'|')
for critical_exon in critical_exons:
try: critical_exon_annotations[gene+':'+critical_exon].append((junction_str,sd.SpliceType()))
except KeyError: critical_exon_annotations[gene+':'+critical_exon] = [(junction_str,sd.SpliceType())]
else: redundant+=1
if array_type != 'RNASeq':
print count, 'Inferred junctions identified with',redundant, 'redundant.'
### Compare exon and intron blocks for intron alinging junctions
junction_inclusion_db = annotateNovelIntronSplicingEvents(common_exon_blocks_intron,common_exon_blocks_exon,junction_inclusion_db)
if len(root_dir)>0: exportUpdatedJunctionComps((species,root_dir),array_type,searchChr=searchChr)
else: exportUpdatedJunctionComps(species,array_type)
clearObjectsFromMemory(junction_inclusion_db); junction_inclusion_db=[]
if array_type == 'RNASeq':
### return these annotations for RNASeq analyses
return junction_annotations,critical_exon_annotations
def annotateNovelIntronSplicingEvents(common_exon_blocks_intron,common_exon_blocks_exon,junction_inclusion_db):
### Add exon-intron, exon-exon reciprical junctions determined based on common block exon (same side of the junction)
new_intron_events=0
for key in common_exon_blocks_intron:
(side,gene,block) = key; source='Inferred-Intron'
if key in common_exon_blocks_exon:
for (excl_junction,excl_junction_probeset) in common_exon_blocks_exon[key]:
for (incl_junction,incl_junction_probeset,critical_intron) in common_exon_blocks_intron[key]:
#if gene == 'ENSG00000112695':# and 'E2.9-E3.1' in excl_junction_probeset:
#print critical_intron,'\t', incl_junction,'\t',excl_junction_probeset,'\t',incl_junction_probeset,'\t',side,'\t',gene,block
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_intron,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
new_intron_events+=1
#print new_intron_events, 'novel intron-splicing events added to database'
"""
### While the below code seemed like a good idea, the current state of RNA-seq alignment tools produced a rediculous amount of intron-intron junctions (usually in the same intron)
### Without supporting data (e.g., other junctions bridging these intron junction to a validated exon), we must assume these juncitons are not associated with the alinging gene
new_intron_events=0 ### Compare Intron blocks to each other
for key in common_exon_blocks_intron:
(side,gene,block) = key; source='Inferred-Intron'
for (excl_junction,excl_junction_probeset,critical_intron1) in common_exon_blocks_intron[key]:
for (incl_junction,incl_junction_probeset,critical_intron2) in common_exon_blocks_intron[key]:
if (excl_junction,excl_junction_probeset) != (incl_junction,incl_junction_probeset): ### If comparing entries in the same list, don't compare an single entry to itself
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_intron1+'|'+critical_intron2,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db and (incl_junction_probeset,excl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
new_intron_events+=1
"""
#print new_intron_events, 'novel intron-splicing events added to database'
return junction_inclusion_db
def determineExclIncl(junction1,junction2,critical_exons):
#((3, 2), (6, 1))
for critical_exon in critical_exons:
if critical_exon in junction1: incl_junction = junction1; excl_junction = junction2
if critical_exon in junction2: incl_junction = junction2; excl_junction = junction1
try: return excl_junction,incl_junction
except Exception:
print critical_exons
print junction1
print junction2
print 'Warning... Unknown error. Contact AltAnalyze support for assistance.'
sys.exit()
def formatJunctions(junction):
#((3, 2), (6, 1))
exons_to_join=[]
for i in junction:
exons_to_join.append('E'+str(i[0])+'.'+string.replace(str(i[1]),'.','_'))
junction_str = string.join(exons_to_join,'-')
return junction_str
def getJunctionPairs(junctions):
### Although the pairs of junctions (exclusion 1st, inclusion 2nd) are given, need to separate out the pairs to report reciprical junctions
# (((3, 2), (6, 1)), ((4, 2), (6, 1)), ((3, 2), (6, 1)), ((4, 2), (6, 1))))
count = 0; pairs=[]; pairs_db={}
for junction in junctions:
count +=1; pairs.append(junction)
if count==2: pairs_db[tuple(pairs)]=[]; count = 0; pairs=[]
return pairs_db
def getJunctionExonLocations(species,array_type,specific_array_type):
global ensembl_associations
ensembl_associations = importJunctionArrayAnnotations(species,array_type,specific_array_type)
extraction_type = 'sequence'
exon_seq_db=importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
exportImportedProbesetLocations(species,array_type,exon_seq_db,ensembl_associations)
getLocations(species,array_type,exon_seq_db)
def exportImportedProbesetLocations(species,array_type,critical_exon_seq_db,ensembl_associations):
location_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations-original.txt'
fn=filepath(location_db_filename); data = open(fn,'w')
title = ['Affygene','ExonID','Ensembl','start','stop','gene-start','gene-stop','ExonSeq']; title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in critical_exon_seq_db:
for cd in critical_exon_seq_db[ens_geneid]:
try:
values = [cd.ArrayGeneID(),cd.ExonID(),ens_geneid,cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()]
values = string.join(values,'\t')+'\n'
data.write(values)
except AttributeError: null = []
data.close()
def identifyCriticalExonLocations(species,array_type):
critical_exon_seq_db = importAnnotateCriticalExonSequences(species,array_type)
getLocations(species,array_type,critical_exon_seq_db)
def getLocations(species,array_type,critical_exon_seq_db):
analysis_type = 'get_locations'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
critical_exon_seq_db = EnsemblImport.import_sequence_data(gene_seq_filename,critical_exon_seq_db,species,analysis_type)
exportCriticalExonLocations(species,array_type,critical_exon_seq_db)
def reAnnotateCriticalExonSequences(species,array_type):
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'null',{})
#analysis_type = 'get_sequence'
analysis_type = ('region_only','get_sequence') ### Added after EnsMart65
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_probeset_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_probeset_db,species,analysis_type)
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
if array_type == 'AltMouse': verifyFile(critical_exon_file,array_type)
updateCriticalExonSequences(array_type, critical_exon_file, ensembl_probeset_db)
if __name__ == '__main__':
"""Module has methods for annotating Junction associated critical exon sequences with up-to-date genome coordinates and analysis options for
junciton arrays from AnalyzeExpressionDatasets"""
m = 'Mm'; h = 'Hs'; species = h; array_type = 'junction' ###In theory, could be another type of junction or combination array
specific_array_type = 'hGlue'
extraction_type = 'comparisons'
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_LiftOverEnsembl.txt'
verifyFile(filename,array_type+'/'+specific_array_type);sys.exit()
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type); sys.exit()
combineExonJunctionAnnotations(species,array_type);sys.exit()
filterForCriticalExons(species,array_type)
overRideExonEntriesWithJunctions(species,array_type);sys.exit()
#inferJunctionComps(species,array_type); sys.exit()
identifyJunctionComps(species,array_type,specific_array_type);sys.exit()
filterForCriticalExons(species,array_type);sys.exit()
reAnnotateCriticalExonSequences(species,array_type)
#getJunctionExonLocations(species,array_type,specific_array_type)
sys.exit()
import_dir = '/AltDatabase/exon/'+species; expr_file_dir = 'R_expression_raw_data\exp.altmouse_es-eb.dabg.rma.txt'
dagb_p = 1; Analysis_Method = 'rma'
#identifyCriticalExonLocations(species,array_type)
#JunctionArrayEnsemblRules.getAnnotations(species,array_type)
### Only needs to be run once, to update the original
#reAnnotateCriticalExonSequences(species,array_type); sys.exit()
#getAnnotations(expr_file_dir,dagb_p,Species,Analysis_Method)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/JunctionArray.py
|
JunctionArray.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import platform
def filepath(filename):
#fn = unique.filepath(filename)
return filename
def read_directory(sub_dir):
dir_list = os.listdir(sub_dir)
return dir_list
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
ls = list(set(database[key]))
ls.sort()
db1[key] = ls
return db1
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file_dir,file = getDirectoryFiles(self.data,str(search_term))
if len(file)<1: print search_term,'not found'
return file_dir,file
def getDirectoryFiles(import_dir, search_term):
exact_file = ''; exact_file_dir=''
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
import_dir = filepath(import_dir)
for data in dir_list: #loop through each file in the directory to output results
if (':' in import_dir) or ('/Users/' == import_dir[:7]) or ('Linux' in platform.system()): affy_data_dir = import_dir+'/'+data
else: affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file_dir = affy_data_dir; exact_file = data
return exact_file_dir,exact_file
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importPGF(dir,species,filename):
fn=filepath(filename); probe_db = {}; x=0
psr_file = dir+'/'+species+'/'+array_type+'/'+species+'_probeset-psr.txt'
psr_file = string.replace(psr_file,'affymetrix/LibraryFiles/','')
try: eo = open(filepath(psr_file),'w')
except Exception: eo = open(filepath(psr_file[1:]),'w')
for line in open(fn,'rU').xreadlines():
if line[0] != '#':
data = cleanUpLine(line); x+=1
t = string.split(data,'\t')
if len(t)==2 or len(t)==3:
if len(t[0])>0:
probeset = t[0]; type = t[1]
eo.write(probeset+'\t'+t[-1]+'\n') ### Used for HTA array where we need to have PSR to probeset IDs
else:
try:
probe = t[2]
#if probeset == '10701621': print probe
try: probe_db[probeset].append(probe)
except KeyError: probe_db[probeset] = [probe]
except Exception: null=[]
eo.close()
new_file = dir+'/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
new_file = string.replace(new_file,'affymetrix/LibraryFiles/','')
headers = 'probeset\t' + 'probe\n'; n=0
try: data = open(filepath(new_file),'w')
except Exception: data = open(filepath(new_file[1:]),'w')
data.write(headers)
for probeset in probe_db:
for probe in probe_db[probeset]:
data.write(probeset+'\t'+probe+'\n'); n+=1
data.close()
print n, 'Entries exported for', new_file
if __name__ == '__main__':
skip_intro = 'yes'
array_type = 'gene'
#array_type = 'exon'
#array_type = 'junction'
array_type = 'gene'
species = 'Mm'
parent_dir = 'AltDatabase/'+species+'/'+array_type+'/library'
parent_dir = '/AltDatabase/affymetrix/LibraryFiles'
e = GrabFiles(); e.setdirectory(parent_dir)
pgf_dir,pgf_file = e.searchdirectory('MoGene-2_0-st.pgf')
importPGF(parent_dir,species,pgf_dir)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ParsePGF.py
|
ParsePGF.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains methods for reading the HMDB and storing relationships"""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import export
import time
import update; reload(update)
from import_scripts import OBO_import
import gene_associations
import traceback
############# Common file handling routines #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def lowerSymbolDB(source_to_gene):
source_to_gene2={}
for symbol in source_to_gene:
source_to_gene2[string.lower(symbol)]=source_to_gene[symbol]
return source_to_gene2
def verifyFile(filename):
fn=filepath(filename); file_found = 'yes'
try:
for line in open(fn,'rU').xreadlines():break
except Exception: file_found = 'no'
return file_found
def importSpeciesData():
if program_type == 'GO-Elite': filename = 'Config/species_all.txt' ### species.txt can be cleared during updating
else: filename = 'Config/goelite_species.txt'
x=0
fn=filepath(filename);global species_list; species_list=[]; global species_codes; species_codes={}
global species_names; species_names={}
global species_taxids; species_taxids={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t'); abrev=t[0]; species=t[1]
try: taxid = t[2]
except Exception: taxid = None
if x==0: x=1
else:
species_list.append(species)
species_codes[species] = abrev
species_names[abrev] = species
species_taxids[abrev] = taxid
def getSourceData():
filename = 'Config/source_data.txt'; x=0
fn=filepath(filename)
global source_types; source_types={}
global system_codes; system_codes={}
global mod_types; mod_types=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t'); source=t[0]
try: system_code=t[1]
except IndexError: system_code = 'NuLL'
if x==0: x=1
else:
if len(t)>2: ### Therefore, this ID system is a potential MOD
if t[2] == 'MOD': mod_types.append(source)
source_types[source]=system_code
system_codes[system_code] = source ###Used when users include system code data in their input file
############# File download/extraction #############
def downloadPAZARAssocations():
""" This database is no longer available - Will replace with TRRUST """
url = 'http://www.pazar.info/tftargets/tftargets.zip'
print 'Downloading Transcription Factor to Target associations'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/tftargets/','')
return 'raw'
def downloadPAZARAssocationsOld():
""" This works fine, but is redundant with the new zip file that contains all files"""
base_url = 'http://www.pazar.info/tftargets/'
filenames = getPAZARFileNames()
print 'Downloading Transcription Factor to Target associations'
source = 'raw'
r = 4; k = -1
for resource in filenames:
filename = filenames[resource]
url = base_url+filename
start_time = time.time()
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/tftargets/','')
end_time = time.time()
if (end_time-start_time)>3: ### Hence the internet connection is very slow (will take forever to get everything)
downloadPreCompiledPAZAR() ### Just get the compiled symbol data instead
print '...access to source PAZAR files too slow, getting pre-compiled from genmapp.org'
source = 'precompiled'
break
k+=1
if r==k:
k=0
print '*',
print ''
return source
def downloadPreCompiledPAZAR():
""" Downloads the already merged symbol to TF file from PAZAR files """
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/tf-target.txt'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/tftargets/symbol/','')
def downloadAmadeusPredictions():
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/symbol-Metazoan-Amadeus.txt'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/Amadeus/','')
def downloadBioMarkers(output_dir):
ensembl_version = unique.getCurrentGeneDatabaseVersion()
#url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/Hs_exon_tissue-specific_protein_coding.zip'
url = 'http://altanalyze.org/archiveDBs/AltDatabase/updated/'+ensembl_version+'/Hs_LineageProfiler.zip'
print 'Downloading BioMarker associations'
fln,status = update.downloadSuppressPrintOuts(url,output_dir,'')
#url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/Mm_gene_tissue-specific_protein_coding.zip'
url = 'http://altanalyze.org/archiveDBs/AltDatabase/updated/'+ensembl_version+'/Mm_LineageProfiler.zip'
fln,status = update.downloadSuppressPrintOuts(url,output_dir,'')
def downloadKEGGPathways(species):
print "Integrating KEGG associations for "+species
url = 'http://www.genmapp.org/go_elite/Databases/KEGG/'+species+'-KEGG_20110518.zip'
### This is a fixed date resource since KEGG licensed their material after this date
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/KEGG/','')
def downloadDomainAssociations(selected_species):
paths=[]
if selected_species != None: ### Restrict to selected species only
current_species_dirs=selected_species
else:
current_species_dirs = unique.read_directory('/'+database_dir)
for species in current_species_dirs:
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/Domains/'+species+'_Ensembl-Domain.gz'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/Domains/','txt')
if 'Internet' not in status:
paths.append((species,fln))
return paths
def downloadPhenotypeOntologyOBO():
print 'Downloading Phenotype Ontology structure and associations'
url = 'ftp://ftp.informatics.jax.org/pub/reports/MPheno_OBO.ontology'
fln,status = update.downloadSuppressPrintOuts(url,program_dir+'OBO/','')
def downloadPhenotypeOntologyGeneAssociations():
url = 'ftp://ftp.informatics.jax.org/pub/reports/HMD_HumanPhenotype.rpt'
#url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/HMD_HumanPhenotype.rpt'
### Mouse and human gene symbols and gene IDs (use the gene symbols)
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/Pheno/','')
def downloadBioGRIDAssociations():
print 'Downloading BioGRID associations'
url = 'http://thebiogrid.org/downloads/archives/Latest%20Release/BIOGRID-ALL-LATEST.tab2.zip'
fln,status = update.download(url,'BuildDBs/BioGRID/','')
def downloadDrugBankAssociations():
print 'Downloading DrugBank associations'
url = 'http://www.drugbank.ca/system/downloads/current/drugbank.txt.zip'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/DrugBank/','')
def downloadPathwayCommons():
#### See - https://www.pathwaycommons.org/archives/PC2/v11/
print 'Downloading PathwayCommons associations'
url = 'http://www.pathwaycommons.org/pc-snapshot/current-release/gsea/by_species/homo-sapiens-9606-gene-symbol.gmt.zip'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/PathwayCommons/','')
def downloadDiseaseOntologyOBO():
print 'Downloading Disease Ontology structure and associations'
""" Unfortunately, we have to download versions that are not as frequently updated, since RGDs server
reliability is poor """
#url = 'ftp://rgd.mcw.edu/pub/data_release/ontology_obo_files/disease/CTD.obo'
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/CTD.obo'
### Includes congenital and environmental diseases - http://ctdbase.org/detail.go?type=disease&acc=MESH%3aD002318
fln,status = update.downloadSuppressPrintOuts(url,program_dir+'OBO/','')
def downloadDiseaseOntologyGeneAssociations(selected_species):
if selected_species == None: sc = []
else: sc = selected_species
""" Unfortunately, we have to download versions that are not as frequently updated, since RGDs server
reliability is poor """
if 'Hs' in sc or len(sc)==0:
#url = 'ftp://rgd.mcw.edu/pub/data_release/annotated_rgd_objects_by_ontology/homo_genes_do'
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/homo_genes_do'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/Disease/','')
if 'Mm' in sc or len(sc)==0:
#url = 'ftp://rgd.mcw.edu/pub/data_release/annotated_rgd_objects_by_ontology/mus_genes_do'
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/mus_genes_do'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/Disease/','')
if 'Rn' in sc or len(sc)==0:
#url = 'ftp://rgd.mcw.edu/pub/data_release/annotated_rgd_objects_by_ontology/rattus_genes_do'
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/rattus_genes_do'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/Disease/','')
def downloadMiRDatabases(species):
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/'+species+'_microRNA-Ensembl-GOElite_strict.txt'
selected = ['Hs','Mm','Rn'] ### these are simply zipped where the others are not
### These files should be updated on a regular basis
if species in selected:
url = string.replace(url,'.txt','.zip')
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/microRNATargets/','')
else:
### Where strict is too strict
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/'+species+'_microRNA-Ensembl-GOElite_lax.txt'
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/microRNATargets/','')
fln = string.replace(fln,'.zip','.txt')
return fln
def downloadRvistaDatabases(species):
### Source files from http://hazelton.lbl.gov/pub/poliakov/wgrvista_paper/
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/'+species+'_RVista_factors.zip'
### These files should be updated on a regular basis
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/RVista/','')
fln = string.replace(fln,'.zip','.txt')
return fln
def remoteDownloadEnsemblTranscriptAssocations(species):
global program_dir
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze':
program_dir = database_dir
downloadEnsemblTranscriptAssociations(species)
def downloadEnsemblTranscriptAssociations(species):
url = 'http://www.genmapp.org/go_elite/Databases/ExternalSystems/Transcripts/'+species+'/Ensembl-EnsTranscript.txt'
### These files should be updated on a regular basis
fln,status = update.downloadSuppressPrintOuts(url,program_dir+species+'/uid-gene/','')
def downloadGOSlimOBO():
url = 'http://geneontology.org/ontology/subsets/goslim_pir.obo'
#url = 'http://www.geneontology.org/GO_slims/goslim_generic.obo' ### Missing
fln,status = update.downloadSuppressPrintOuts(url,database_dir+'OBO/','')
def importUniProtAnnotations(species_db,force):
base_url = 'http://www.altanalyze.org/archiveDBs/'
uniprot_ensembl_db={}
for species in species_db:
url = base_url+species+'/custom_annotations.txt'
if force=='yes':
fln,status = update.downloadSuppressPrintOuts(url,'BuildDBs/UniProt/'+species+'/','')
else:
fln = 'BuildDBs/UniProt/'+species+'/custom_annotations.txt'
for line in open(fln,'rU').xreadlines():
data = cleanUpLine(line)
try:
ens_gene,compartment,function,symbols,full_name,uniprot_name,uniprot_ids,unigene = string.split(data,'\t')
symbols = string.split(string.replace(symbols,'; Synonyms=',', '),', ')
uniprot_ensembl_db[species,uniprot_name] = ens_gene
species_extension = string.split(uniprot_name,'_')[-1]
full_name = string.split(full_name,';')[0]
if 'Transcription factor' in full_name:
symbols.append(string.split(full_name,'Transcription factor ')[-1]) ### Add this additional synonym to symbols
### Extend this database out to account for weird names in PAZAR
for symbol in symbols:
new_name = string.upper(symbol)+'_'+species_extension
if new_name not in uniprot_ensembl_db:
uniprot_ensembl_db[species,symbol+'_'+species_extension] = ens_gene
uniprot_ensembl_db[species,string.upper(symbol)] = ens_gene
except Exception:
None
return uniprot_ensembl_db
############# Import/processing/export #############
def getPAZARFileNames():
""" Filenames are manually and periodically downloaded from: http://www.pazar.info/cgi-bin/downloads_csv.pl"""
fn = filepath('Config/PAZAR_list.txt')
x=0
filenames = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
resource, filename = string.split(data,'\t')
filenames[resource]=filename
return filenames
class TFTargetInfo:
def __init__(self,tf_name,ens_gene,project,pmid,analysis_method):
self.tf_name=tf_name
self.ens_gene=ens_gene
self.project=project
self.pmid=pmid
self.analysis_method=analysis_method
def TFName(self): return self.tf_name
def Ensembl(self): return self.ens_gene
def Project(self):
if self.project[-1]=='_':
return self.project[:-1]
else:
return self.project
def PMID(self): return self.pmid
def AnalysisMethod(self): return self.analysis_method
def __repr__(self): return self.TFName()
def importPAZARAssociations(force):
pazar_files = unique.read_directory('/BuildDBs/tftargets')
species_db={}
tf_to_target={}
tf_name_to_ID_db={}
for file in pazar_files:
if '.csv' in file:
name = string.join(string.split(file,'_')[1:-1],'_')
fn = filepath('BuildDBs/tftargets/'+file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
try:
### Each line contains the following 11 tab-delim fields:
### Fields are: <PAZAR TF ID> <TF Name> <PAZAR Gene ID> <ensembl gene accession> <chromosome> <gene start coordinate> <gene end coordinate> <species> <project name> <PMID> <analysis method>
pazar_tf_id, ens_tf_transcript, tf_name, pazar_geneid, ens_gene, chr, gene_start,gene_end,species,project,pmid,analysis_method = string.split(data,'\t')
if ens_tf_transcript == 'ENSMUST00000105345' and 'Pluripotency' in project:
### This is a specific error (TCF3 corresponds to TCF7L1 but poor nomenclature resulted in a mis-annotation here)
ens_tf_transcript = 'ENSMUST00000069536'
species,genus = string.split(species,' ')
species = species[0]+genus[0]
tft=TFTargetInfo(tf_name,ens_gene,project,pmid,analysis_method)
try: tf_to_target[species,tf_name].append(tft)
except Exception: tf_to_target[species,tf_name] = [tft]
species_db[species]=[]
tf_name_to_ID_db[tf_name] = ens_tf_transcript ### This is an Ensembl transcript ID -> convert to gene ID
except Exception:
None ### Occurs due to file formatting issues (during an update?)
if determine_tf_geneids == 'yes':
""" The below code is probably most useful for creation of complex regulatory inference networks in Cytoscape """
uniprot_ensembl_db = importUniProtAnnotations(species_db,force) ### Get UniProt IDs that often match Pazar TF names
transcript_to_gene_db={}
gene_to_symbol_db={}
#species_db={}
#species_db['Mm']=[]
for species in species_db:
try:
try: gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript')); #print mod_source, 'relationships imported.'
except Exception:
try:
downloadEnsemblTranscriptAssociations(species)
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
except Exception: gene_to_transcript_db={}
try: gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception:
### Download this
base_url = 'http://www.altanalyze.org/archiveDBs/'
url = base_url+species+'/Ensembl-Symbol.txt'
update.downloadSuppressPrintOuts(url,'AltDatabase/goelite/'+species+'/uid-gene/','')
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_transcript_db={}; gene_to_symbol={}
#print len(gene_to_transcript_db), species
for gene in gene_to_transcript_db:
for transcript in gene_to_transcript_db[gene]:
transcript_to_gene_db[transcript]=gene
for gene in gene_to_symbol:
gene_to_symbol_db[gene] = gene_to_symbol[gene]
missing=[]
for (species,tf_name) in tf_to_target:
original_tf_name = tf_name
try:
ens_tf_transcript = tf_name_to_ID_db[tf_name]
ens_gene = transcript_to_gene_db[ens_tf_transcript]
#if 'ENSMUST00000025271' == ens_tf_transcript: print ens_gene;kill
#print gene_to_symbol_db[ens_gene];sys.exit()
symbol = string.lower(gene_to_symbol_db[ens_gene][0]) ### covert gene ID to lower case symbol ID
### Store the original TF name and Ensembl symbol (different species TFs can have different symbols - store all)
try: tf_to_target_symbol[original_tf_name].append(symbol)
except Exception: tf_to_target_symbol[original_tf_name] = [symbol]
except Exception:
try:
#ens_tf_transcript = tf_name_to_ID_db[tf_name]
#print species, tf_name, ens_tf_transcript;sys.exit()
ens_gene = uniprot_ensembl_db[species,tf_name]
symbol = string.lower(gene_to_symbol_db[ens_gene][0]) ### covert gene ID to lower case symbol ID
try: tf_to_target_symbol[original_tf_name].append(symbol)
except Exception: tf_to_target_symbol[original_tf_name] = [symbol]
except Exception:
try:
tf_name = string.split(tf_name,'_')[0]
ens_gene = uniprot_ensembl_db[species,tf_name]
symbol = string.lower(gene_to_symbol_db[ens_gene][0]) ### covert gene ID to lower case symbol ID
try: tf_to_target_symbol[original_tf_name].append(symbol)
except Exception: tf_to_target_symbol[original_tf_name] = [symbol]
except Exception:
try:
tf_names=[]
if '/' in tf_name:
tf_names = string.split(tf_name,'/')
elif ' ' in tf_name:
tf_names = string.split(tf_name,' ')
for tf_name in tf_names:
ens_gene = uniprot_ensembl_db[species,tf_name]
symbol = string.lower(gene_to_symbol_db[ens_gene][0]) ### covert gene ID to lower case symbol ID
try: tf_to_target_symbol[original_tf_name].append(symbol)
except Exception: tf_to_target_symbol[original_tf_name] = [symbol]
except Exception: missing.append((tf_name,species))
print 'Ensembl IDs found for transcript or UniProt Transcription factor names:',len(tf_to_target_symbol),'and missing:', len(missing)
#print missing[:20]
### Translate all species data to gene symbol to export for all species
species_tf_targets={}
for (species,tf_name) in tf_to_target:
try:
tf_db = species_tf_targets[species]
tf_db[tf_name] = tf_to_target[species,tf_name]
except Exception:
tf_db = {}
tf_db[tf_name] = tf_to_target[species,tf_name]
species_tf_targets[species] = tf_db
tf_dir = 'BuildDBs/tftargets/symbol/tf-target.txt'
tf_data = export.ExportFile(tf_dir)
tf_to_symbol={}
#print 'Exporting:',tf_dir
#print len(species_tf_targets)
for species in species_tf_targets:
try: gene_to_source_id = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_source_id={}
tf_db = species_tf_targets[species]
for tf_name in tf_db:
for tft in tf_db[tf_name]:
try:
for symbol in gene_to_source_id[tft.Ensembl()]:
symbol = string.lower(symbol)
tf_id = tf_name+'(Source:'+tft.Project()+'-PAZAR'+')'
tf_data.write(tf_id+'\t'+symbol+'\n')
try: tf_to_symbol[tf_id].append(symbol)
except Exception: tf_to_symbol[tf_id] = [symbol]
except Exception: null=[];
tf_data.close()
tf_to_symbol = gene_associations.eliminate_redundant_dict_values(tf_to_symbol)
return tf_to_symbol
def importPAZARcompiled():
""" Skips over the above function when these tf-target file is downlaoded directly """
tf_dir = 'BuildDBs/tftargets/symbol/tf-target.txt'
tf_to_symbol={}
fn = filepath(tf_dir)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
tf_id,symbol = string.split(data,'\t')
try: tf_to_symbol[tf_id].append(symbol)
except Exception: tf_to_symbol[tf_id] = [symbol]
tf_to_symbol = gene_associations.eliminate_redundant_dict_values(tf_to_symbol)
return tf_to_symbol
def importPhenotypeOntologyGeneAssociations():
x=0
pheno_symbol={}; phen=[]
fn = filepath('BuildDBs/Pheno/HMD_HumanPhenotype.rpt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
hs_symbol=t[0]; hs_entrez=t[1]; mm_symbol=t[2]; mgi=t[3]; pheno_ids=t[4]
if 'MP' not in pheno_ids:
try: pheno_ids = t[5]
except Exception: pass
hs_symbol = string.lower(hs_symbol)
mm_symbol = string.lower(mm_symbol)
symbols = [mm_symbol,hs_symbol]
pheno_ids = string.split(pheno_ids,' '); phen+=pheno_ids
for pheno_id in pheno_ids:
if len(pheno_id)>0:
for symbol in symbols:
try: pheno_symbol[pheno_id].append(symbol)
except Exception: pheno_symbol[pheno_id]=[symbol]
phen = unique.unique(phen)
pheno_symbol = gene_associations.eliminate_redundant_dict_values(pheno_symbol)
return pheno_symbol
def importAmandeusPredictions(force):
if force == 'yes':
downloadAmadeusPredictions()
x=0
tf_symbol_db={}
fn = filepath('BuildDBs/Amadeus/symbol-Metazoan-Amadeus.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
symbol,system,tf_name = string.split(data,'\t')
symbol = string.lower(symbol)
if tf_name == 'Oct4': tf_name = 'Pou5f1' ### Known annotation issue
try: tf_symbol_db[tf_name].append(symbol)
except Exception: tf_symbol_db[tf_name]=[symbol]
tf_symbol_db = gene_associations.eliminate_redundant_dict_values(tf_symbol_db)
if determine_tf_geneids == 'yes':
""" ### Since this data is species independent (not indicated in the current file) can't apply this yet
uniprot_ensembl_db = importUniProtAnnotations(species_db,force)
try: gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_symbol={}
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
symbol_to_gene = lowerSymbolDB(symbol_to_gene)
uniprot_ensembl_db = lowerSymbolDB(uniprot_ensembl_db)
"""
### Build a TFname to Ensembl gene symbol database for Amadeus
for tf_name in tf_symbol_db:
symbol = string.lower(string.split(tf_name,'(')[0])
if 'miR' not in tf_name and 'let' not in tf_name: ### Exlude miRNAs
try: tf_to_target_symbol[tf_name].append(symbol)
except Exception: tf_to_target_symbol[tf_name] = [symbol]
return tf_symbol_db
def importDiseaseOntologyGeneAssocations():
disease_ontology_files = unique.read_directory('/BuildDBs/Disease')
symbol_to_DO={}
for file in disease_ontology_files:
if '_do' in file:
fn = filepath('BuildDBs/Disease/'+file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(t)>1:
symbol=string.lower(t[2]); doid = t[4]
try: symbol_to_DO[doid].append(symbol)
except Exception: symbol_to_DO[doid]=[symbol]
return symbol_to_DO
def exportSymbolRelationships(pathway_to_symbol,selected_species,pathway_type,type):
if selected_species != None: ### Restrict to selected species only
current_species_dirs=selected_species
else:
current_species_dirs = unique.read_directory('/'+database_dir)
for species in current_species_dirs:
if '.' not in species:
ens_dir = database_dir+'/'+species+'/gene-'+type+'/Ensembl-'+pathway_type+'.txt'
ens_data = export.ExportFile(ens_dir)
try:
if determine_tf_geneids == 'yes':
### When TF data is exported and TF gene-gene interactions are defined, export them
tf_network_dir = 'BuildDBs/TF_Interactions/'+species+'/interactions.txt'
tf_network_data = export.ExportFile(tf_network_dir)
tf_network_data.write('Symbol1\tInteractionType\tSymbol2\tGeneID1\tGeneID2\tSource\n')
interaction = 'transcriptional_target'
print 'Exporting TF-Target gene-gene relationships to',tf_network_dir
tf_count=0; unique_interactions={}
try:
gene_chr_db = gene_associations.getGeneToUid(species,('hide','Ensembl-chr'))
except Exception:
try:
ensembl_version = unique.getCurrentGeneDatabaseVersion()
from build_scripts import EnsemblSQL
EnsemblSQL.getChrGeneOnly(species,'Basic',ensembl_version,'yes')
gene_chr_db = gene_associations.getGeneToUid(species,('hide','Ensembl-chr'))
except Exception: gene_chr_db={}
except Exception: None
if 'mapp' in type: ens_data.write('GeneID\tSystem\tGeneSet\n')
else: ens_data.write('GeneID\tGeneSet\n')
try: ens_to_entrez = gene_associations.getGeneToUid(species,('hide','Ensembl-EntrezGene'))
except Exception: ens_to_entrez ={}
if len(ens_to_entrez)>0:
entrez_dir = database_dir+'/'+species+'/gene-'+type+'/EntrezGene-'+pathway_type+'.txt'
entrez_data = export.ExportFile(entrez_dir)
if 'mapp' in type: entrez_data.write('GeneID\tSystem\tGeneSet\n')
else: entrez_data.write('GeneID\tGeneSet\n')
#print 'Exporting '+pathway_type+' databases for:',species
try: gene_to_source_id = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_source_id={}
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
source_to_gene = lowerSymbolDB(source_to_gene)
for pathway in pathway_to_symbol:
for symbol in pathway_to_symbol[pathway]:
try:
genes = source_to_gene[symbol]
for gene in genes:
if len(genes)<5: ### don't propagate redundant associations
if 'mapp' in type: ens_data.write(gene+'\tEn\t'+pathway+'\n')
else: ens_data.write(gene+'\t'+pathway+'\n')
if gene in ens_to_entrez:
for entrez in ens_to_entrez[gene]:
if 'mapp' in type: entrez_data.write(entrez+'\tL\t'+pathway+'\n')
else: entrez_data.write(entrez+'\t'+pathway+'\n')
try:
if determine_tf_geneids == 'yes':
if '(' in pathway:
source_name = string.split(pathway,'(')[0]
else:
source_name = pathway
proceed = True
if gene in gene_chr_db:
if len(gene_chr_db[gene][0])>2: ### not a valid chromosome (e.g., HSCHR6_MHC_COX)
proceed = False
if proceed ==True or proceed == False:
try: symbols = tf_to_target_symbol[source_name]
except Exception: symbols = tf_to_target_symbol[pathway]
for symbol in symbols:
tf_gene = source_to_gene[symbol][0] ### meta-species converted symbol -> species Ensembl gene
tf_symbol = gene_to_source_id[tf_gene][0] ### species formatted symbol for TF
symbol = gene_to_source_id[gene][0] ### species formatted symbol for target
if (tf_symbol,symbol,pathway) not in unique_interactions:
tf_network_data.write(string.join([tf_symbol,interaction,symbol,tf_gene,gene,pathway],'\t')+'\n')
unique_interactions[tf_symbol,symbol,pathway]=[]
try: merged_tf_interactions[tf_symbol].append(string.lower(symbol))
except Exception: merged_tf_interactions[tf_symbol] = [string.lower(symbol)]
tf_count+=1
except Exception: None
except Exception: null=[]
ens_data.close()
try: entrez_data.close()
except Exception: null=[]
try:
if determine_tf_geneids == 'yes':
tf_network_data.close()
print tf_count,'TF to target interactions exported..'
except Exception: None
def translateBioMarkersBetweenSpecies(input_dir,species):
### Convert the species Ensembl primary key IDs from the source to
try:
biomarker_files = unique.read_directory(input_dir)
except Exception:
biomarker_files = unique.read_directory(input_dir)
try: gene_to_source_id = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_source_id={}
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
source_to_gene = lowerSymbolDB(source_to_gene)
print len(source_to_gene)
marker_symbol_db={}
for file in biomarker_files:
if 'tissue-specific' in file and species not in file: ### Don't re-translate an already translated file
export_dir = 'AltDatabase/ensembl/'+species+'/'+species+file[2:]
export_data = export.ExportFile(export_dir)
fn = filepath(input_dir+'/'+file)
x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x = 1; y=0
for i in t:
if 'Symbol' in i: sy = y
y+=1
export_data.write(line)
else:
ensembl = t[0]; symbol = string.lower(t[sy])
#print symbol, len(source_to_gene);sys.exit()
if symbol in source_to_gene:
species_gene = source_to_gene[symbol][0] ### should only be one gene per symbol (hopefully)
values = string.join([species_gene]+t[1:],'\t')+'\n'
export_data.write(values)
export_data.close()
print export_dir,'written...'
def extractKEGGAssociations(species,mod,system_codes):
import_dir = filepath('/BuildDBs/KEGG')
g = gene_associations.GrabFiles(); g.setdirectory(import_dir)
filedir = g.getMatchingFolders(species)
gpml_data,pathway_db = gene_associations.parseGPML(filepath(filedir))
gene_to_WP = gene_associations.unifyGeneSystems(gpml_data,species,mod)
gene_associations.exportCustomPathwayMappings(gene_to_WP,mod,system_codes,filepath(database_dir+'/'+species+'/gene-mapp/'+mod+'-KEGG.txt'))
if len(gene_to_WP)>0 and mod == 'Ensembl': ### Export all pathway interactions
try: gene_associations.exportNodeInteractions(pathway_db,mod,filepath(filedir))
except Exception: null=[]
elif len(gene_to_WP)>0 and mod == 'HMDB': ### Export all pathway interactions
try: gene_associations.exportNodeInteractions(pathway_db,mod,filepath(filedir),appendToFile=True)
except Exception: null=[]
return filedir
def extractGMTAssociations(species,mod,system_codes,data_type,customImportFile=None):
if mod != 'HMDB':
if customImportFile != None:
import_dir = customImportFile
else:
import_dir = filepath('/BuildDBs/'+data_type)
gmt_data = gene_associations.parseGMT(import_dir)
gene_to_custom = gene_associations.unifyGeneSystems(gmt_data,species,mod)
gene_associations.exportCustomPathwayMappings(gene_to_custom,mod,system_codes,filepath(database_dir+'/'+species+'/gene-mapp/'+mod+'-'+data_type+'.txt'))
def transferGOSlimGeneAssociations(selected_species):
if selected_species != None: ### Restrict to selected species only
current_species_dirs=selected_species
else:
current_species_dirs = unique.read_directory('/'+database_dir)
for species_code in current_species_dirs:
try:
ens_go_file_dir = filepath(database_dir+'/'+species_code+'/gene-go/Ensembl-GOSlim.txt')
goslim_ens_file = filepath(database_dir+'/'+species_code+'/uid-gene/Ensembl-goslim_goa.txt')
export.copyFile(goslim_ens_file,ens_go_file_dir)
translateToEntrezGene(species_code,ens_go_file_dir)
except Exception: null=[]
def translateToEntrezGene(species,filename):
x=0; type = 'pathway'
try: ens_to_entrez = gene_associations.getGeneToUid(species,('hide','Ensembl-EntrezGene'))
except Exception: ens_to_entrez ={}
if len(ens_to_entrez)>0:
export_file = string.replace(filename,'Ensembl','EntrezGene')
export_data = export.ExportFile(export_file)
export_data.write('EntrezGene\tOntologyID\n')
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
if x==0: x=1
else:
data = cleanUpLine(line)
try:
ensembl,pathway = string.split(data,'\t')
type = 'ontology'
except Exception:
ensembl,null,pathway = string.split(data,'\t')
try:
entrezs = ens_to_entrez[ensembl]
for entrez in entrezs:
if type == 'ontology':
export_data.write(entrez+'\t'+pathway+'\n')
else:
export_data.write(entrez+'\tEn\t'+pathway+'\n')
except Exception:
null=[]
export_data.close()
def importRVistaGeneAssociations(species_code,source_path):
x=0; tf_symbol_db={}
fn = filepath(source_path)
TF_symbol_db={}
if species_code == 'Dm' or species_code == 'Mm': ### Dm IDs are Ensembl
gene_to_symbol = gene_associations.getGeneToUid(species_code,('hide','Ensembl-Symbol'))
increment = 10000
print 'Importing symbol-R Vista TF assocations (be patient)'
x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
x+=1
#if x==increment: x=0; print '*',
try:
symbol,TF = string.split(data,'\t')
if species_code == 'Dm' or species_code == 'Mm':
ensembls = [symbol]
try: symbol = gene_to_symbol[symbol][0]
except Exception: forceError
try: TF_symbol_db[TF].append(string.lower(symbol))
except Exception: TF_symbol_db[TF]=[string.lower(symbol)]
except Exception: None
TF_symbol_db = gene_associations.eliminate_redundant_dict_values(TF_symbol_db)
return TF_symbol_db
def importMiRGeneAssociations(species_code,source_path):
try:
destination_path = filepath(database_dir+'/'+species_code+'/gene-mapp/Ensembl-microRNATargets.txt')
export.copyFile(source_path,destination_path)
translateToEntrezGene(species_code,destination_path)
except Exception: null=[]
def importBioMarkerGeneAssociations(input_dir):
try:
biomarker_folder = unique.read_directory(input_dir)
except Exception:
biomarker_folder = unique.read_directory(input_dir)
marker_symbol_db={}
for folder in biomarker_folder:
if '.' in folder: continue ### This is a file not a folder
else: biomarker_files = unique.read_directory(input_dir+'/'+folder)
for file in biomarker_files:
x=0
if '.txt' in file and '-correlation' not in file and 'AltExon' not in file:
fn = filepath('BuildDBs/BioMarkers/'+folder+'/'+file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x = 1; y=0
for i in t:
if 'marker-in' in i: mi = y
if 'Symbol' in i: sy = y
y+=1
ensembl = t[0]; symbol = string.lower(t[sy]); marker = t[mi]
markers = string.split(marker,'|')
for marker in markers:
try: marker_symbol_db[marker].append(symbol)
except Exception: marker_symbol_db[marker]=[symbol]
marker_symbol_db = gene_associations.eliminate_redundant_dict_values(marker_symbol_db)
return marker_symbol_db
def importDomainGeneAssociations(species_code,source_path):
try:
destination_path = filepath(database_dir+'/'+species_code+'/gene-mapp/Ensembl-Domains.txt')
export.copyFile(source_path,destination_path)
translateToEntrezGene(species_code,destination_path)
except Exception: null=[]
def importBioGRIDGeneAssociations(taxid,species):
model_mammal_tax = {}
model_mammal_tax['9606'] = 'Hs'
model_mammal_tax['10090'] = 'Mm'
model_mammal_tax['10116'] = 'Rn'
filtered=considerOnlyMammalian([species]) ### See if the species is a mammal
if len(filtered)==0: model_mammal_tax={} ### Don't inlcude the gold standard mammals if not a mammal
model_mammal_tax[taxid]=species
biogrid_files = unique.read_directory('/BuildDBs/BioGRID')
latest_file = biogrid_files[-1]
fn = filepath('BuildDBs/BioGRID/'+latest_file)
ens_dir = database_dir+'/'+species+'/gene-interactions/Ensembl-BioGRID.txt'
ens_data = export.ExportFile(ens_dir)
ens_data.write('Symbol1\tInteractionType\tSymbol2\tGeneID1\tGeneID2\tSource\n')
try: gene_to_source_id = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_source_id={}
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
source_to_gene = lowerSymbolDB(source_to_gene)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
species_tax = t[15]
if species_tax in model_mammal_tax:
symbol1 = t[7]; symbol2 = t[8]
source_exp = t[11]; interaction_type = t[12]
try:
ens1 = source_to_gene[string.lower(symbol1)][0]
ens2 = source_to_gene[string.lower(symbol2)][0]
values = string.join([symbol1,interaction_type,symbol2,ens1,ens2,source_exp],'\t')+'\n'
ens_data.write(values)
except Exception:
None
def importDrugBankAssociations(species):
fn = filepath('BuildDBs/DrugBank/drugbank.txt')
ens_dir = database_dir+'/'+species+'/gene-interactions/Ensembl-DrugBank.txt'
ens_data = export.ExportFile(ens_dir)
ens_data.write('DrugName\tMechanism\tGeneSymbol\tDrugBankDB-ID\tGeneID\tSource\n')
try: gene_to_source_id = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception: gene_to_source_id={}
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
source_to_gene = lowerSymbolDB(source_to_gene)
getCAS=False
getGenericName=False
getMechanim = False
getGeneName = False
geneNames=[]
mechanism=''
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data == '# Primary_Accession_No:': getCAS=True ### switched this from CAS to Drug bank ID for consistency and namining simplicity
elif getCAS: casID = data; getCAS=False
if data == '# Generic_Name:': getGenericName=True
elif getGenericName: genericName = data; getGenericName=False
if data == '# Mechanism_Of_Action:': getMechanim=True
elif getMechanim:
if len(data)>0: mechanism += data + ' '
else: getMechanim=False
if '# Drug_Target_' in data and '_Gene_Name' in data: getGeneName=True
elif getGeneName: geneNames.append(data); getGeneName=False
if '#END_DRUGCARD' in data:
for symbol in geneNames:
try:
ens = source_to_gene[string.lower(symbol)][0]
values = string.join([genericName,mechanism,symbol,casID,ens,'DrugBank'],'\t')+'\n'
ens_data.write(values)
except Exception:
None
casID=''; genericName=''; mechanism=''; geneNames=[]
############# Central buid functions #############
def importWikiPathways(selected_species,force):
if selected_species == None:
selected_species = unique.read_directory('/'+database_dir)
importSpeciesData()
getSourceData()
all_species = 'no'
if force == 'yes':
try:
gene_associations.convertAllGPML(selected_species,all_species) ### Downloads GPMLs and builds flat files
for species_code in selected_species:
interaction_file = 'GPML/Interactomes/interactions.txt'
moveInteractionsToInteractionsDir(interaction_file,species_code,'WikiPathways')
status = 'built'
except IOError:
print 'Unable to connect to http://www.wikipathways.org'
status = 'failed'
status = 'built'
if status == 'built':
from import_scripts import BuildAffymetrixAssociations
for species_code in selected_species:
species_name = species_names[species_code]
if status == 'built':
relationship_types = ['native','mapped']
for relationship_type in relationship_types:
#print 'Processing',relationship_type,'relationships'
index=0
integrate_affy_associations = 'no'
incorporate_previous = 'yes'
process_affygo = 'no'
counts = BuildAffymetrixAssociations.importWikipathways(source_types,incorporate_previous,process_affygo,species_name,species_code,integrate_affy_associations,relationship_type,'over-write previous')
index+=1
print 'Finished integrating updated WikiPathways'
def moveInteractionsToInteractionsDir(source_file,species,name):
destination_file = filepath('AltDatabase/goelite/'+species+'/gene-interactions/Ensembl-'+name+'.txt')
source_file = filepath(source_file)
try: export.copyFile(source_file,destination_file)
except Exception: None ### No file to move
def importKEGGAssociations(selected_species,force):
supported_databases = ['Ag','At','Ce','Dm','Dr','Hs','Mm','Os','Rn']
getSourceData()
if selected_species != None: ### Restrict by selected species
supported_databases2=[]
for species in selected_species:
if species in supported_databases:
supported_databases2.append(species)
supported_databases = supported_databases2
mod_types_list=[]
for i in mod_types: mod_types_list.append(i)
mod_types_list.sort()
for species in supported_databases:
if force == 'yes':
downloadKEGGPathways(species)
for mod in mod_types_list:
buildDB_dir = extractKEGGAssociations(species,mod,system_codes)
interaction_file = buildDB_dir+'/Interactomes/interactions.txt'
moveInteractionsToInteractionsDir(interaction_file,species,'KEGG')
def importPathwayCommons(selected_species,force):
original_species = selected_species
selected_species = considerOnlyMammalian(selected_species)
if len(selected_species) == 0:
print 'PLEASE NOTE: %s does not support PathwayCommons update.' % string.join(original_species,',')
else:
if force == 'yes':
downloadPathwayCommons()
getSourceData()
for species in selected_species:
for mod in mod_types:
extractGMTAssociations(species,mod,system_codes,'PathwayCommons')
def importTranscriptionTargetAssociations(selected_species,force):
original_species = selected_species
selected_species = considerOnlyMammalian(selected_species)
x=[]
if len(selected_species) == 0:
print 'PLEASE NOTE: %s does not support Transcription Factor association update.' % string.join(original_species,',')
else:
global determine_tf_geneids
global tf_to_target_symbol ### Used for TF-target interaction networks
global merged_tf_interactions
tf_to_target_symbol={}
source = 'raw'#'precompiled'
merged_tf_interactions={} ### Stores the final merged PAZAR-Amadeus merged data
determine_tf_geneids = 'yes'
### No need to specify a species since the database will be added only to currently installed species
if force == 'yes':
source = downloadPAZARAssocations()
if source == 'raw':
x = importPAZARAssociations(force) ### builds the PAZAR TF-symbol associations from resource.csv files
if source == 'precompiled' or len(x)==0:
x = importPAZARcompiled() ### imports from pre-compiled/downloaded TF-symbol associations
y = importAmandeusPredictions(force)
z = dict(x.items() + y.items())
geneset = 'TFTargets'
if determine_tf_geneids == 'yes':
tf_to_target_symbol = gene_associations.eliminate_redundant_dict_values(tf_to_target_symbol)
exportSymbolRelationships(z,selected_species,geneset,'mapp')
determine_tf_geneids = 'no'
geneset = 'MergedTFTargets'
z = merged_tf_interactions
exportSymbolRelationships(merged_tf_interactions,selected_species,geneset,'mapp')
for species in selected_species:
interaction_file = 'BuildDBs/TF_Interactions/'+species+'/interactions.txt'
moveInteractionsToInteractionsDir(interaction_file,species,'TFTargets')
def importPhenotypeOntologyData(selected_species,force):
original_species = selected_species
selected_species = considerOnlyMammalian(selected_species)
if len(selected_species) == 0:
print 'PLEASE NOTE: %s does not support Phenotype Ontology update.' % string.join(original_species,',')
else:
### No need to specify a species since the database will be added only to currently installed species
if force == 'yes':
downloadPhenotypeOntologyOBO()
downloadPhenotypeOntologyGeneAssociations()
x = importPhenotypeOntologyGeneAssociations()
exportSymbolRelationships(x,selected_species,'MPhenoOntology','go')
def importDiseaseOntologyAssociations(selected_species,force):
original_species = selected_species
selected_species = considerOnlyMammalian(selected_species)
if len(selected_species) == 0:
print 'PLEASE NOTE: %s does not support Disease Ontology update.' % string.join(original_species,',')
else:
if force == 'yes':
downloadDiseaseOntologyOBO()
downloadDiseaseOntologyGeneAssociations(selected_species)
x = importDiseaseOntologyGeneAssocations()
exportSymbolRelationships(x,selected_species,'CTDOntology','go')
def importGOSlimAssociations(selected_species,force):
if force == 'yes':
downloadGOSlimOBO()
transferGOSlimGeneAssociations(selected_species)
def importRVistaAssocations(selected_species,force):
supported_databases = ['Hs','Mm','Dm']
selected_supported_databases=[]
for species in selected_species:
if species in supported_databases:
selected_supported_databases.append(species)
missing_Rvista_associations=[]
found_Rvista_associations=[]
for species in selected_supported_databases:
if force == 'yes':
try:
fn = downloadRvistaDatabases(species)
found_Rvista_associations.append((species,fn))
except Exception:
missing_Rvista_associations.append(species)
else:
fn = filepath('BuildDBs/RVista/'+species+'_RVista_factors.txt')
found_Rvista_associations.append((species,fn))
for (species,fn) in found_Rvista_associations:
TF_symbol_db = importRVistaGeneAssociations(species,fn)
exportSymbolRelationships(TF_symbol_db,[species],'RVista_TFsites','mapp')
def importMiRAssociations(selected_species,force):
supported_databases = unique.read_directory('/'+database_dir)
if selected_species != None: ### Restrict by selected species
supported_databases=selected_species
missing_miR_associations=[]
found_miR_associations=[]
for species in supported_databases:
if force == 'yes':
try:
fn = downloadMiRDatabases(species)
found_miR_associations.append((species,fn))
except Exception:
missing_miR_associations.append(species)
for (species,fn) in found_miR_associations:
importMiRGeneAssociations(species,fn)
interaction_file = 'AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-microRNATargets.txt'
moveInteractionsToInteractionsDir(interaction_file,species,'microRNATargets')
def importBioMarkerAssociations(selected_species,force):
original_species = selected_species
selected_species = considerOnlyMammalian(selected_species)
if len(selected_species) == 0:
print 'PLEASE NOTE: %s does not support BioMarker association update.' % string.join(original_species,',')
else:
if force == 'yes':
downloadBioMarkers('BuildDBs/BioMarkers/')
x = importBioMarkerGeneAssociations('BuildDBs/BioMarkers')
exportSymbolRelationships(x,selected_species,'BioMarkers','mapp')
def importDrugBank(selected_species,force):
if force == 'yes':
downloadDrugBankAssociations()
for species in selected_species:
importDrugBankAssociations(species)
def importBioGRID(selected_species,force):
if force == 'yes':
downloadBioGRIDAssociations()
for species in selected_species:
importSpeciesData() ### Creates the global species_taxids
if species in species_taxids:
taxid = species_taxids[species]
importBioGRIDGeneAssociations(taxid,species)
def importDomainAssociations(selected_species,force):
if force == 'yes':
paths = downloadDomainAssociations(selected_species)
for (species,path) in paths:
path = string.replace(path,'.gz','.txt')
importDomainGeneAssociations(species, path)
def considerOnlyMammalian(selected_species):
supported_mammals = ['Am','Bt', 'Cf', 'Ch', 'Cj', 'Cp', 'Do', 'Ec', 'Ee', 'Et', 'Fc', 'Gg', 'Go', 'Hs',
'La', 'Ma', 'Md', 'Me', 'Mi', 'Ml', 'Mm', 'Oa', 'Oc','Og', 'Op', 'Pc', 'Pp',
'Pt', 'Pv', 'Rn', 'Sa', 'Ss', 'St', 'Tb', 'Tn', 'Tr', 'Ts', 'Tt', 'Vp']
filtered_species=[]
if selected_species == None:
selected_species = unique.read_directory('/'+database_dir)
for i in selected_species:
if i in supported_mammals:
filtered_species.append(i)
return filtered_species
def buildInferrenceTables(selected_species):
for species_code in selected_species:
file_found = verifyFile(database_dir+'/'+species_code+'/uid-gene/Ensembl-Symbol'+'.txt') ### If file is present, the below is not needed
if file_found == 'no':
try: gene_associations.swapAndExportSystems(species_code,'Ensembl','EntrezGene') ### Allows for analysis of Ensembl IDs with EntrezGene based GO annotations (which can vary from Ensembl)
except Exception: null=[] ### Occurs if EntrezGene not supported
### Build out these symbol association files
try: gene_associations.importGeneData(species_code,('export','Ensembl'))
except Exception: null=[] ### Occurs if EntrezGene not supported
try: gene_associations.importGeneData(species_code,('export','EntrezGene'))
except Exception: null=[] ### Occurs if EntrezGene not supported
def exportBioTypes(selected_species):
for species in selected_species:
eo = export.ExportFile('AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-Biotypes.txt')
eo.write('Ensembl\tSystemCode\tClass\n')
filename = 'AltDatabase/uniprot/'+species+'/custom_annotations.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ens_gene,compartment,custom_class = t[:3]
if 'GPCR' in custom_class:
custom_class = ['GPCR']
else:
custom_class = string.split(custom_class,'|')
custom_class = string.split(compartment,'|')+custom_class
for c in custom_class:
if len(c)>0:
eo.write(ens_gene+'\t\t'+c+'\n')
eo.close()
def buildAccessoryPathwayDatabases(selected_species,additional_resources,force):
global database_dir
global program_dir
global program_type
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze':
program_dir = database_dir
else:
program_dir=''
buildInferrenceTables(selected_species) ### Make sure these tables are present first!!!
print 'Attempting to update:', string.join(additional_resources,',')
if 'Latest WikiPathways' in additional_resources:
try: importWikiPathways(selected_species,force)
except Exception:
#print traceback.format_exc()
print 'WikiPathways import failed (cause unknown)'
if 'KEGG' in additional_resources:
try:
importKEGGAssociations(selected_species,force)
#print traceback.format_exc()
except Exception:
print 'KEGG import failed (cause unknown)'
if 'Transcription Factor Targets' in additional_resources:
try: importTranscriptionTargetAssociations(selected_species,force)
except Exception:
#print traceback.format_exc()
print 'Transcription Factor Targets import failed (cause unknown)'
if 'Phenotype Ontology' in additional_resources:
try: importPhenotypeOntologyData(selected_species,force)
except Exception: print 'Phenotype Ontology import failed (cause unknown)'
if 'Disease Ontology' in additional_resources:
try: importDiseaseOntologyAssociations(selected_species,force)
except Exception: print 'Disease Ontology import failed (cause unknown)'
if 'GOSlim' in additional_resources:
try: importGOSlimAssociations(selected_species,force)
except Exception: print 'GOSlim import failed (cause unknown)'
if 'miRNA Targets' in additional_resources:
try: importMiRAssociations(selected_species,force)
except Exception: print 'miRNA Targets import failed (cause unknown)'
if 'BioMarkers' in additional_resources:
try: importBioMarkerAssociations(selected_species,force)
except Exception:
print 'BioMarkers import failed (cause unknown)'#,traceback.format_exc()
if 'Domains2' in additional_resources: ### Currently disabled since it's utility is likely low but analysis time is long
try: importDomainAssociations(selected_species,force)
except Exception: print 'Domains import failed (cause unknown)'
if 'PathwayCommons' in additional_resources:
try:
importPathwayCommons(selected_species,force)
except Exception:
#print traceback.format_exc()
print 'PathwayCommons import failed (cause unknown)'
if 'RVista Transcription Factor Sites' in additional_resources:
try: importRVistaAssocations(selected_species,force)
except Exception: print 'R Vista Transcription Factor Site import failed (cause unknown)'
if 'BioGRID' in additional_resources:
try: importBioGRID(selected_species,force)
except Exception:
#print traceback.format_exc()
print 'BioGRID import failed (cause unknown)'
if 'DrugBank' in additional_resources:
try: importDrugBank(selected_species,force)
except Exception: print 'Drug Bank import failed (cause unknown)'
try: exportBioTypes(selected_species)
except Exception: pass
if __name__ == '__main__':
selected_species = ['Mm']
force = 'yes'
download_species = 'Hs'
species = 'Hs'
#species = 'Mm'
mod = 'Ensembl'
#"""
getSourceData()
program_type,database_dir = unique.whatProgramIsThis()
extractGMTAssociations(species,mod,system_codes,'KidneyMouse',customImportFile='/Users/saljh8/Dropbox/scRNA-Seq Markers/Mouse/Markers/Kidney/gmt');sys.exit()
#"""
#exportBioTypes(selected_species);sys.exit()
additional_resources=['Latest WikiPathways']#'KEGG','BioGRID','DrugBank','miRNA Targets','Transcription Factor Targets']
#translateBioMarkersBetweenSpecies('AltDatabase/ensembl/'+download_species,species);sys.exit()
additional_resources=['Latest WikiPathways','PathwayCommons','Transcription Factor Targets','Domains','BioMarkers']
additional_resources+=['miRNA Targets','GOSlim','Disease Ontology','Phenotype Ontology','KEGG','RVista Transcription Factor Sites']
additional_resources=['Latest WikiPathways']
buildAccessoryPathwayDatabases(selected_species,additional_resources,force)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/GeneSetDownloader.py
|
GeneSetDownloader.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def import_refseq(filename):
fn=filepath(filename)
refseq_mRNA_db = {}
seq_begin = 0
y = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data[0:5] == 'LOCUS':
y += 1
cds = ''
seq = ''
ac = data[12:]
ac_ls= string.split(ac,' ')
ac = ac_ls[0]
elif data[5:8] == 'CDS':
cds = cds + data[21:]
try:
cds_start,cds_end = string.split(cds,'..')
except ValueError:
if cds[0:4] == 'join':
cds,second_cds = string.split(cds[5:],',')
cds_start,cds_end = string.split(cds,'..')
cds = int(cds_start),int(cds_end)
elif seq_begin == 1:
seq_temp = data[10:]
seq_temp = string.split(seq_temp,' ')
for sequence in seq_temp:
seq = seq + sequence
elif data[0:6] == 'ORIGIN':
seq_begin = 1
if data[0:2] == '//': #new entry
if len(seq) > 0 and len(cds)>0 and len(ac)>0:
#refseq_mRNA_db[ac] = cds,seq
if grab_sequence == "all":
retrieved_seq = seq
elif grab_sequence == "3UTR":
retrieved_seq = seq[cds[1]:]
elif grab_sequence == "5UTR":
retrieved_seq = seq[0:cds[0]]
elif grab_sequence == "first_last_coding_30":
retrieved_seq = (seq[cds[0]-1:cds[0]-1+30],seq[cds[1]-30:cds[1]])
if len(retrieved_seq) > 0:
refseq_mRNA_db[ac] = retrieved_seq
ac = ''
cds = ''
seq = ''
seq_begin = 0
print "Number of imported refseq entries:", len(refseq_mRNA_db),'out of',y
fasta_data = 'refseq_mRNA_converted_'+grab_sequence+'.txt'
fn=filepath(fasta_data)
data = open(fn,'w')
for ac in refseq_mRNA_db:
retrieved_seq = refseq_mRNA_db[ac]
if len(retrieved_seq[0])>0: ###Thus if there are two sequences stored as a tuple or list
if grab_sequence == "first_last_coding_30":
retrieved_seq = retrieved_seq[0] +'\t'+ retrieved_seq[1]
info = ac +'\t'+ str(len(retrieved_seq)) +'\t'+ retrieved_seq +'\n'
data.write(info)
data.close()
print fasta_data, 'written'
if __name__ == '__main__':
#grab_sequence = "5UTR"
#grab_sequence = "3UTR"
grab_sequence = "all"
#grab_sequence = "first_last_coding_30"
input_file = 'mouse.rna.gbff'
#input_file = 'human.rna.gbff'
import_refseq(input_file)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/RefSeqParser.py
|
RefSeqParser.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from stats_scripts import statistics
import math
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
################# Parse and Analyze Files
def getConstitutiveProbesets(filename):
"""This function assumes that the probesets used by Affymetrix to calculate gene expression
represent constitutive features. Examination of these annotations reveal discrepencies in this
assumption (some constitutive probesets not used and others used instead). Note: The previous statement
is not correct since it assumes core means constitutive, which is not the case.
"""
fn=filepath(filename)
constutive={}; x = 0
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n')
data = string.replace(data,'"','')
if data[0] != '#' and 'probeset' not in data:
probeset_id, transcript_cluster_id, probeset_list, probe_count = string.split(data,'\t')
try: probeset_list = string.split(probeset_list,' ')
except ValueError: probeset_list = [probeset_list]
constutive[transcript_cluster_id] = probeset_list
x += len(probeset_list)
print "Constitutive transcript clusters:",len(constutive),"and probesets:",x
return constutive
def getTranscriptAnnotation(filename,Species,test_status,test_clusterid):
"""This function gathers transcript_cluster annotations. Since Affymetrix transcript_clusters appear to
often cover more than one transcipt or have inconsistent mappings with other resources be wary. """
global species; species = Species; global test; global test_cluster; test = test_status; test_cluster=test_clusterid
fn=filepath(filename);trans_annotation_db={};trans_annot_extended={}
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n')
if data[0] != '#' and 'seqname' not in data:
tab_data = string.split(data[1:-1],'","'); tab_data2=[]; transcript_id = tab_data[0]
try: strand = tab_data[3]
except IndexError:
continue ### Problems in the source file exist if this happens... mis-placed \n, skip line
continue_analysis = 'no'
if test=='yes': ###used to test the program for a single gene
if transcript_id in test_cluster: continue_analysis='yes'
else: continue_analysis='yes'
if continue_analysis=='yes':
gene_annot = tab_data[7]; gene_annot2 = tab_data[8]
uniprot = tab_data[9]; unigene = tab_data[10]
#start = tab_data[4]; stop = tab_data[5]; go_b = tab_data[11]; go_c = tab_data[12]; go_f = tab_data[13]
try: pathway = tab_data[14]; domain = tab_data[15]; family = tab_data[16]
except IndexError: pathway = tab_data[14]; domain = tab_data[15]; family = ''
try: gene_annot = string.split(gene_annot,' // '); symbol=gene_annot[1]; definition=gene_annot[2]
except IndexError: ref_seq = ''; symbol = ''; definition = ''
uniprot_list=[]; uniprot_data = string.split(uniprot,' /// ')
for entry in uniprot_data:
if ' // ' in entry: other_id,uniprot_id = string.split(entry,' // '); uniprot_list.append(uniprot_id)
unigene_list=[]; unigene_data = string.split(unigene,' /// ')
for entry in unigene_data:
if ' // ' in entry: other_id,unigene_id,tissue_exp = string.split(entry,' // '); unigene_list.append(unigene_id)
ensembl_list=[]; ensembl_data = string.split(gene_annot2,'gene:ENSG')
for entry in ensembl_data:
if entry[0:2] == '00': ###Ensembl id's will have 0000 following the ENSG
ensembl_data = string.split(entry,' '); ensembl_list.append('ENSG'+ensembl_data[0])
trans_annotation_db[transcript_id] = definition, symbol, ensembl_list
trans_annot_extended[transcript_id] = ensembl_list,unigene_list,uniprot_list #strand,go_b,go_c,go_f,pathway,domain,family
exportTranscriptAnnotations(trans_annot_extended)
return trans_annotation_db
def getProbesetAssociations(filename):
#probeset_db[probeset] = affygene,exons,probe_type_call,ensembl
fn=filepath(filename)
probe_association_db={}; constitutive_ranking = {}; x = 0; exon_location={}; mRNA_associations = {}
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n')
data = string.replace(data,'"',''); y = 0
t = string.split(data,',')
if data[0] != '#' and 'probeset' in data:
affy_headers = t
for header in affy_headers:
index = 0
while index < len(affy_headers):
if 'probeset_id' == affy_headers[index]: pi = index
if 'start' == affy_headers[index]: st = index
if 'stop' == affy_headers[index]: sp = index
if 'level' == affy_headers[index]: lv = index
if 'exon_id' == affy_headers[index]: ei = index
if 'transcript_cluster_id' == affy_headers[index]: tc = index
if 'seqname' == affy_headers[index]: sn = index
if 'strand' == affy_headers[index]: sd = index
if 'fl' == affy_headers[index]: fn = index
if 'mrna' == affy_headers[index]: mr = index
if 'est' == affy_headers[index]: es = index
if 'ensGene' == affy_headers[index]: eg = index
if 'mrna_assignment' == affy_headers[index]: ma = index
index += 1
elif data[0] != '#' and 'probeset' not in data:
probeset_id=t[pi];exon_cluster_id=t[ei];transcript_cluster_id=t[tc]; chr=t[sn];strand=t[sd]; mrna_assignment=t[ma]
start=int(t[st]);stop=int(t[sp]); exon_type=t[lv]; fl=int(t[fn]); mRNA=int(t[mr]); est=int(t[es]); ensembl=int(t[eg])
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
###Extract out the mRNA identifiers from the mRNA_annotation field
ensembl_ids, mRNA_ids = grabRNAIdentifiers(mrna_assignment)
mRNA_associations[probeset_id] = ensembl_ids, mRNA_ids
evidence_count = fl+mRNA
if strand == "-": start = int(t[7]); stop = int(t[6])
if exons_to_grab == 'core' and exon_type == 'core': y = 1; x += 1
elif exons_to_grab == 'extended' and (exon_type == 'core' or exon_type == 'extended'): y = 1; x += 1
elif exons_to_grab == 'full': ### includes 'ambiguous' exon_type
y = 1; x += 1
if y == 1:
probe_association_db[probeset_id]=transcript_cluster_id,probeset_id,exon_type
if strand == "-":
new_start = stop; new_stop = start; start = new_start; stop = new_stop
try: exon_location[transcript_cluster_id,chr,strand].append((start,stop,exon_type,probeset_id))
except KeyError: exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
const_info = [ensembl,evidence_count,est,probeset_id]
try: constitutive_ranking[transcript_cluster_id].append(const_info)
except KeyError: constitutive_ranking[transcript_cluster_id] = [const_info]
print "Probeset Associations Parsed"
###Export probeset to transcript annotations
exportProbesetAnnotations(mRNA_associations)
###Optionally grab constitutive annotations based on Ensembl, full-length and EST evidence only
if constitutive_source != 'Affymetrix':
print "Begining to assembl constitutive annotations (Based on Ensembl/FL/EST evidence)..."
alt_constitutive_gene_db,const_probe_count = rankConstitutive(constitutive_ranking)
print "Number of Constitutive genes:",len(alt_constitutive_gene_db),"Number of Constitutive probesets:",const_probe_count
exon_location2 = annotateExons(exon_location)
print "Selected",exons_to_grab,"Probesets:",x
return probe_association_db,exon_location2,alt_constitutive_gene_db
def grabRNAIdentifiers(mrna_assignment):
ensembl_ids=[]; mRNA_ids=[]
mRNA_entries = string.split(mrna_assignment,' /// ')
for entry in mRNA_entries:
mRNA_info = string.split(entry,' // '); mrna_ac = mRNA_info[0]
if 'ENS' in mrna_ac: ensembl_ids.append(mrna_ac)
else:
try: int(mrna_ac[-3:]); mRNA_ids.append(mrna_ac)
except ValueError: continue
ensembl_ids = unique.unique(ensembl_ids)
mRNA_ids = unique.unique(mRNA_ids)
return ensembl_ids, mRNA_ids
def rankConstitutive(constitutive_ranking):
constitutive_gene_db = {}
for key in constitutive_ranking:
c = constitutive_ranking[key]
ens_list=[]; fl_list=[]; est_list=[]
for entry in c:
ens = entry[0]; fl = entry[1]; est = entry[2]; probeset = entry[3]
ens_list.append(ens); fl_list.append(fl); est_list.append(est)
ens_list.sort();ens_list.reverse(); fl_list.sort(); fl_list.reverse(); est_list.sort(); est_list.reverse()
top_ens = ens_list[0]; top_fl = fl_list[0]; top_est = est_list[0]
###Remove EST only gene entries and entries with no mRNA or EST alignment variation
if (top_ens == ens_list[-1]) and (top_fl == fl_list[-1]):
if (top_est != est_list[-1]) and ((top_fl > 0) or (top_ens > 0)):
for entry in c:
if entry[2] == top_est:
const_probeset = entry[3]
try:constitutive_gene_db[key].append(const_probeset)
except KeyError:constitutive_gene_db[key] = [const_probeset]
else: continue
elif top_ens > 1 and top_fl > 1:
for entry in c:
if entry[0] == top_ens:
const_probeset = entry[3]
try:constitutive_gene_db[key].append(const_probeset)
except KeyError:constitutive_gene_db[key] = [const_probeset]
elif top_fl > 1:
for entry in c:
if entry[1] == top_fl:
const_probeset = entry[3]
try:constitutive_gene_db[key].append(const_probeset)
except KeyError:constitutive_gene_db[key] = [const_probeset]
elif top_ens > 1:
for entry in c:
if entry[0] == top_ens:
const_probeset = entry[3]
try:constitutive_gene_db[key].append(const_probeset)
except KeyError:constitutive_gene_db[key] = [const_probeset]
n=0; constitutive_probe_db={}
for key in constitutive_gene_db:
for entry in constitutive_gene_db[key]:
n+=1
return constitutive_gene_db,n
def annotateExons(exon_location):
###Annotate Affymetrix probesets independent of other annotations. A problem with this method
###Is that it fails to recognize distance probesets that probe different regions of the same exon.
for key in exon_location:
exon_location[key].sort()
strand = key[2]
if strand == "-":
exon_location[key].reverse()
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z']
exon_location2={}
for key in exon_location:
index = 1; index2 = 0; exon_list=[]; y = 0
for exon in exon_location[key]:
if y == 0:
exon_info = 'E'+str(index),exon
exon_list.append(exon_info); y = 1; last_start = exon[0]; last_stop = exon[1]; index += 1; index2 = 0
elif y == 1:
current_start = exon[0]; current_stop = exon[1]
if (abs(last_stop - current_start) < 20) or (abs(last_start - current_start) < 20) or (abs(current_stop - last_stop) < 20): ###Therefore, the two exons are < 20bp away
exon_info = 'E'+str(index-1)+alphabet[index2],exon
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index2 += 1
else:
exon_info = 'E'+str(index),exon
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index += 1; index2 = 0
exon_location2[key] = exon_list
"""for key in exon_location2:
if key[0] == '3242353':
print key, exon_location2[key]"""
return exon_location2
################# Select files for analysis and output results
def getDirectoryFiles():
dir = '/AltDatabase/'+species+'/exon'
dir_list = read_directory(dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = dir[1:]+'/'+data
if 'transcript-annot' in affy_data_dir:
transcript_annotation_file = affy_data_dir
elif '.annot' in affy_data_dir:
probeset_transcript_file = affy_data_dir
elif '.probeset' in affy_data_dir:
probeset_annotation_file = affy_data_dir
return probeset_transcript_file,probeset_annotation_file,transcript_annotation_file
"""
def getDirectoryFiles(dir,exons_to_grab):
import_dir = '/AltDatabase/'+species+'/exon'
probeset_annotation_file=''
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = dir[1:]+'/'+data
if 'transcript-annot' in affy_data_dir:
transcript_annotation_file = affy_data_dir
elif 'annot.hg' in affy_data_dir:
probeset_transcript_file = affy_data_dir
elif 'annot.hg' in affy_data_dir:
probeset_annotation_file = affy_data_dir
if exons_to_grab in affy_data_dir: ###this is the choosen constitutive list
constitutive_probe_file = affy_data_dir
return transcript_annotation_file,probeset_transcript_file,probeset_annotation_file,constitutive_probe_file
"""
def eliminateRedundant(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def getAnnotations(exons_to_grab,constitutive_source,process_from_scratch):
"""Annotate Affymetrix exon array data using files provided at http://www.Affymetrix.com. Filter these annotations based on
the choice of 'core', 'extended', or 'full' annotations."""
probeset_transcript_file,probeset_annotation_file,transcript_annotation_file = getDirectoryFiles()
probe_association_db, exon_location_db, alt_constitutive_gene_db = getProbesetAssociations(probeset_annotation_file)
trans_annotation_db = getTranscriptAnnotation(transcript_annotation_file)
constitutive_db = getConstitutiveProbesets(constitutive_probe_file)
if constitutive_source != 'Affymetrix':
return probe_association_db,alt_constitutive_gene_db,exon_location_db, trans_annotation_db, trans_annot_extended
else:
return probe_association_db,constitutive_db,exon_location_db, trans_annotation_db, trans_annot_extended
def exportProbesetAnnotations(mRNA_associations):
probeset_annotation_export = 'AltDatabase/ensembl/' + species + '/'+ species + '_probeset-mRNA_annot.txt'
fn=filepath(probeset_annotation_export); data = open(fn,'w')
title = 'probeset_id'+'\t'+'ensembl_transcript_ids'+'\t'+'mRNA_accession_ids'+'\n'
data.write(title); y=0
for probeset_id in mRNA_associations:
ensembl_ids, mRNA_ids = mRNA_associations[probeset_id]
if len(ensembl_ids)>0 or len(mRNA_ids)>0:
ensembl_ids = string.join(ensembl_ids,','); mRNA_ids = string.join(mRNA_ids,',')
values = probeset_id +'\t'+ ensembl_ids +'\t'+ mRNA_ids +'\n'
data.write(values); y+=1
data.close()
print y, "Probesets linked to mRNA accession numbers exported to text file:",probeset_annotation_export
def exportTranscriptAnnotations(transcript_annotation_db):
transcript_annotation_export = 'AltDatabase/ensembl/' + species + '/'+ species + '_Affygene-external_annot.txt'
fn=filepath(transcript_annotation_export); data = open(fn,'w')
title = 'transcript_cluster_id'+'\t'+'ensembl_transcript_ids'+'\t'+'mRNA_accession_ids'+'\n'
data.write(title); y=0
for transcript_cluster_id in transcript_annotation_db:
ensembl_list,unigene_list,uniprot_list = transcript_annotation_db[transcript_cluster_id]
if len(ensembl_list)>0 or len(unigene_list)>0 or len(uniprot_list)>0:
ensembl_ids = string.join(ensembl_list,','); unigene_ids = string.join(unigene_list,','); uniprot_ids = string.join(uniprot_list,',')
values = transcript_cluster_id +'\t'+ ensembl_ids +'\t'+ unigene_ids +'\t'+ uniprot_ids +'\n'
data.write(values); y+=1
data.close()
print y, "Transcript clusters linked to other gene annotations to text file:",transcript_annotation_export
if __name__ == '__main__':
species = 'Hs'
exons_to_grab = "core"
constitutive_source = 'custom'
process_from_scratch = 'yes'
getAnnotations(exons_to_grab,constitutive_source,process_from_scratch)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ExonArrayAffyRules.py
|
ExonArrayAffyRules.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from stats_scripts import statistics
import copy
import time
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".TXT" or entry[-3:] == ".fa":
dir_list2.append(entry)
return dir_list2
def importSplicingAnnotationDatabase(filename,array_type):
global exon_db; fn=filepath(filename)
print 'importing', filename
exon_db={}; count = 0; x = 0
for line in open(fn,'r').readlines():
probeset_data,null = string.split(line,'\n') #remove endline
if x == 0: x = 1
else:
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutitive_probeset, ens_exon_ids, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(probeset_data,'\t')
except Exception: t = string.split(probeset_data,'\t'); print len(t), t;kill
#probe_data = AffyExonSTData(probeset_id,ensembl_gene_id,exon_id,ens_exon_ids,transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutitive_probeset, exon_region, splicing_event, splice_junctions)
if ':' in probeset_id:
id1,id2 = string.split(probeset_id,':')
if id2[0]=='E' or id2[0]=='I': probeset_id = probeset_id ### This import method applies to
else: probeset_id = id2
### Store exon region start and stop since this corresponds to the critical-exon-region-seq_update.txt file sequence coordinates
probe_data = AffyExonSTDataSimple(probeset_id,ensembl_gene_id,exon_region,ens_exon_ids,probeset_start,probeset_stop,strand,chromosome)
exon_db[probeset_id] = probe_data
print len(exon_db), 'probesets imported'
return exon_db
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self): return self._probeset
def IncludeProbeset(self):
include_probeset = 'yes'
if self.ArrayType() == 'AltMouse':
if filter_probesets_by == 'exon':
if '-' in self.ExonID() or '|' in self.ExonID(): ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in self.ExonID(): include_probeset = 'no'
if self.ArrayType() == 'exon':
if filter_probesets_by == 'core':
if self.ProbesetClass() != 'core': include_probeset = 'no'
return include_probeset
def ExonID(self): return self._exonid
def GeneID(self): return self._geneid
def ExternalGeneID(self): return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self): return self._block_structure
def SecondaryExonID(self): return self._block_exon_ids
def Chromosome(self): return self._chromosome
def Strand(self): return self._strand
def ProbeStart(self): return int(self._start)
def ProbeStop(self): return int(self._stop)
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self): return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def SecondaryGeneID(self): return self._secondary_geneid
def SetExonSeq(self,seq): self._exon_seq = seq
def ExonSeq(self): return string.upper(self._exon_seq)
def SetJunctionSeq(self,seq): self._junction_seq = seq
def JunctionSeq(self): return string.upper(self._junction_seq)
def RecipricolProbesets(self): return self._junction_probesets
def ExonRegionID(self): return self._exon_region
def Report(self):
output = self.Probeset() +'|'+ self.ExternalGeneID()
return output
def __repr__(self): return self.Report()
class AffyExonSTData(SplicingAnnotationData):
def __init__(self,probeset_id,ensembl_gene_id,exon_id,ens_exon_ids,transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutitive_probeset, exon_region, splicing_event, splice_junctions):
self._geneid = ensembl_gene_id; self._external_gene = ensembl_gene_id; self._exonid = exon_id
self._constitutive_status = constitutitive_probeset; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; self._secondary_geneid = transcript_cluster_id; self._chromosome = chromosome
self._probest_class = affy_class; self._probeset=probeset_id
self._exon_region=exon_region; self._splicing_event=splicing_event; self._splice_junctions=splice_junctions
if self._exonid[0] == 'U': self._probeset_type = 'UTR'
elif self._exonid[0] == 'E': self._probeset_type = 'exonic'
elif self._exonid[0] == 'I': self._probeset_type = 'intronic'
def ExonRegionID(self): return self._exon_region
def SplicingEvent(self): return self._splicing_event
def SpliceJunctions(self): return self._splice_junctions
def Constitutive(self):
if len(self._splicing_event)>0: return 'no' ###Over-ride affymetrix probeset file annotations if an exon is alternatively spliced
else: return self._constitutive_status
class AffyExonSTDataSimple(SplicingAnnotationData):
def __init__(self,probeset_id,ensembl_gene_id,exon_region,ens_exon_ids,probeset_start,probeset_stop,strand,chr):
self._geneid = ensembl_gene_id; self._external_gene = ensembl_gene_id; self._exon_region = exon_region
self._external_exonids = ens_exon_ids; self._probeset=probeset_id
self._chromosome = chr
try: self._start=int(probeset_start); self._stop = int(probeset_stop)
except Exception: self._start=probeset_start; self._stop = probeset_stop
self._strand = strand
class JunctionDataSimple(SplicingAnnotationData):
def __init__(self,probeset_id,ensembl_gene_id,array_geneid,junction_probesets,critical_exons):
self._geneid = ensembl_gene_id; self._junction_probesets = junction_probesets; self._exonid = critical_exons
self._probeset=probeset_id; self._external_gene = array_geneid; self._external_exonids = ''
def importProbesetSeqeunces(filename,array_type,exon_db,chromosome,species):
#output_file = 'input/'+species+'/filtered_probeset_sequence.txt'
#fn=filepath(output_file)
#datar = open(fn,'w')
print 'importing', filename
probeset_seq_db={}; probesets_parsed=0; probesets_added=0
chromosome = 'chr'+str(chromosome)
print "Begining generic fasta import of",filename
fn=filepath(filename)
sequence = ''; x = 0;count = 0
for line in open(fn,'r').xreadlines():
try: data, newline= string.split(line,'\n')
except ValueError: continue
try:
if data[0] == '>':
try:
try:
y = exon_db[probeset]
sequence = string.upper(sequence)
gene = y.GeneID(); y.SetExonSeq(sequence)
try: probeset_seq_db[gene].append(y)
except KeyError: probeset_seq_db[gene] = [y]
sequence = ''; t= string.split(data,';'); probesets_added+=1
probeset=t[0]; probeset_data = string.split(probeset,':'); probeset = probeset_data[-1]
chr = t[2]; chr = string.split(chr,'='); chr = chr[-1]; probesets_parsed +=1
except KeyError:
sequence = ''; t= string.split(data,';')
probeset=t[0]; probeset_data = string.split(probeset,':'); probeset = probeset_data[-1]
chr = t[2]; chr = string.split(chr,'='); chr = chr[-1]; probesets_parsed +=1
#if chr == chromosome: go = 'yes'
#else: go = 'no'
except UnboundLocalError: ###Occurs for the first entry
t= string.split(data,';')
probeset=t[0]; probeset_data = string.split(probeset,':'); probeset = probeset_data[-1]
chr = t[2]; chr = string.split(chr,'='); chr = chr[-1]; probesets_parsed +=1
#if chr == chromosome: go = 'yes'
#else: go = 'no'
else: sequence = sequence + data
except IndexError: continue
try:
y = exon_db[probeset]
sequence = string.upper(sequence)
gene = y.GeneID(); y.SetExonSeq(sequence)
try: probeset_seq_db[gene].append(y)
except KeyError: probeset_seq_db[gene] = [y]
except KeyError: null=[]
#datar.close()
#exportAssociations(probeset_seq_db,species)
print len(probeset_seq_db), probesets_added, probesets_parsed, len(exon_db)
#print probeset_seq_db['ENSG00000156006']
return probeset_seq_db
def exportAssociations(probeset_seq_db,species):
output_file = 'AltAnalyze/'+species+'/SequenceData/'+species+'/filtered_probeset_sequence.txt'
fn=filepath(output_file)
data = open(fn,'w')
for probeset in probeset_seq_db:
seq = probeset_seq_db[probeset]
data.write(probeset+'\t'+seq+'\n')
data.close()
def getParametersAndExecute(probeset_seq_file,array_type,species):
probeset_annotations_file = 'AltDatabase/'+species+'/exon/'+species+'_Ensembl_probesets.txt'
###Import probe-level associations
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type)
start_time = time.time(); chromosome = 1
probeset_seq_db = importProbesetSeqeunces(probeset_seq_file,array_type,exon_db,chromosome,species)
if array_type == 'gene':
probeset_annotations_file = string.replace(probeset_annotations_file,'exon','gene')
gene_exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type)
translation_db = exportAllExonToGeneArrayAssociations(exon_db,gene_exon_db)
probeset_seq_db = convertExonProbesetSequencesToGene(probeset_seq_db,gene_exon_db,translation_db)
end_time = time.time(); time_diff = int(end_time-start_time)
print "Analyses finished in %d seconds" % time_diff
return probeset_seq_db
def convertExonProbesetSequencesToGene(probeset_seq_db,gene_exon_db,translation_db):
probeset_gene_seq_db={}; probeset_db = {}
print len(probeset_seq_db), 'gene entries with exon probeset sequence'
for probeset in gene_exon_db:
y = gene_exon_db[probeset]
if probeset in translation_db:
try:
for z in probeset_seq_db[y.GeneID()]:
if z.Probeset() in translation_db[probeset]:
y.SetExonSeq(z.ExonSeq()) ### Use the exon array sequence if the exon regions are the same
try: probeset_gene_seq_db[y.GeneID()].append(y)
except KeyError: probeset_gene_seq_db[y.GeneID()] = [y]
probeset_db[probeset]=[]
except KeyError: null=[]
print len(probeset_db), 'gene probesets annotated with exon sequence'
return probeset_gene_seq_db
def exportAllExonToGeneArrayAssociations(exon_db,gene_exon_db):
### Above function basically does this but some exon array probesets may not be sequence matched (probably not necessary)
output_file = 'AltDatabase/'+species+'/gene/'+species+'_gene-exon_probesets.txt'
fn=filepath(output_file)
data = open(fn,'w')
data.write('gene_probeset\texon_probeset\n')
exon_db_gene = {}
for probeset in exon_db:
y = exon_db[probeset]
try: exon_db_gene[y.GeneID()].append(y)
except KeyError: exon_db_gene[y.GeneID()] = [y]
#print gene_exon_db['10411047'].ExonRegionID(),exon_db['4355948'].ExonRegionID()
translation_db={}
for probeset in gene_exon_db:
y = gene_exon_db[probeset]
try:
for z in exon_db_gene[y.GeneID()]:
if z.ExonRegionID() == y.ExonRegionID():
try: translation_db[probeset].append(z.Probeset())
except KeyError: translation_db[probeset] = [z.Probeset()]
except KeyError: null=[]
### Mop-ups (get imperfect relationships)
for probeset in gene_exon_db:
if probeset not in translation_db:
y = gene_exon_db[probeset]
try:
for z in exon_db_gene[y.GeneID()]:
if (z.ExonRegionID()+'|') in (y.ExonRegionID()+'|') or (y.ExonRegionID()+'|') in (z.ExonRegionID()+'|'):
#print z.ExonRegionID(), y.ExonRegionID();kill
try: translation_db[probeset].append(z.Probeset())
except KeyError: translation_db[probeset] = [z.Probeset()]
except KeyError: null=[]
translation_db2={}
for probeset in translation_db:
y = gene_exon_db[probeset]
#if probeset == '10411047': print '******',translation_db[probeset]
for exon_probeset in translation_db[probeset]:
z = exon_db[exon_probeset]
coordinates = [y.ProbeStart(), y.ProbeStop(), z.ProbeStart(), z.ProbeStop()]
coordinates.sort(); include = 'yes'
### Ensures that the coordinates overlap versus are separate
if [y.ProbeStart(), y.ProbeStop()] == coordinates[:2]: include ='no'
elif [y.ProbeStart(), y.ProbeStop()] == coordinates[-2:]: include = 'no'
### Include a probeset if the exon and gene probeset locations overlap or if there is only one associations
if include == 'yes' or len(translation_db[probeset]) == 1:
data.write(probeset+'\t'+exon_probeset+'\n')
try: translation_db2[probeset].append(exon_probeset)
except Exception: translation_db2[probeset] = [exon_probeset]
if probeset not in translation_db2:
### If nothing got added, then just add the last match
data.write(probeset+'\t'+exon_probeset+'\n')
try: translation_db2[probeset].append(exon_probeset)
except Exception: translation_db2[probeset] = [exon_probeset]
data.close()
print 'Out of all',len(gene_exon_db),'gene array probesets',len(translation_db2), 'had corresponding exon array probesets found.'
return translation_db2
def getExonAnnotationsAndSequence(probeset_seq_file,array_type,species):
probeset_annotations_file = 'AltDatabase/'+species+'/exon/'+species+'_Ensembl_probesets.txt'
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type)
chromosome = 1
###By running this next function, we can update exon_db to include probeset sequence data
array_type=''
try: probeset_seq_db = importProbesetSeqeunces(probeset_seq_file,array_type,exon_db,chromosome,species)
except IOError: null = []
return exon_db
def import_ensembl_unigene(species):
filename = 'AltDatabase/'+species+'/SequenceData/'+species+'_Ensembl-Unigene.txt'
fn=filepath(filename); unigene_ensembl={}
print 'importing', filename
for line in open(fn,'r').xreadlines():
data, newline= string.split(line,'\n')
ensembl,unigene = string.split(data,'\t')
if len(unigene)>1 and len(ensembl)>1:
try: unigene_ensembl[unigene].append(ensembl)
except KeyError: unigene_ensembl[unigene] = [ensembl]
print 'len(unigene_ensembl)',len(unigene_ensembl)
return unigene_ensembl
def importEnsemblAnnotations(species):
filename = 'AltDatabase/'+species+'/SequenceData/'+species+'_Ensembl-annotations.txt'
fn=filepath(filename); symbol_ensembl={}
print 'importing', filename
for line in open(fn,'r').xreadlines():
data, newline= string.split(line,'\n')
t = string.split(data,'\t'); ensembl = t[0]
try: symbol = t[2]
except IndexError: symbol = ''
if len(symbol)>1 and len(ensembl)>1:
try: symbol_ensembl[symbol].append(ensembl)
except KeyError: symbol_ensembl[symbol] = [ensembl]
print 'len(symbol_ensembl)',len(symbol_ensembl)
symbol_ensembl = eliminate_redundant_dict_values(symbol_ensembl)
return symbol_ensembl
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def importmiRNATargetPredictionsAdvanced(species):
filename = 'AltDatabase/'+species+'/SequenceData/miRBS-combined_gene-target-sequences.txt'
print 'importing', filename; count = 0
source_db={} ### collect stats on the input file sources
fn=filepath(filename); ensembl_mirna_db={}; unigene_ids={}; x = 4000; z=0; nulls={}
for line in open(fn,'r').xreadlines():
line = string.replace(line,"'",''); line = string.replace(line,'"','')
data, newline = string.split(line,'\n')
microrna,ensembl,three_prime_utr_anchor_site_seq,sources = string.split(data,'\t')
sources_list = string.split(sources,'|')
for source in sources_list:
try: source_db[source]+=1
except Exception: source_db[source]=1
y = MicroRNAData(ensembl,microrna,three_prime_utr_anchor_site_seq,sources)
count+=1
#y = microrna,three_prime_utr_anchor_site_seq,sources
try: ensembl_mirna_db[ensembl].append(y)
except KeyError: ensembl_mirna_db[ensembl] = [y]
ensembl_mirna_db = eliminate_redundant_dict_values(ensembl_mirna_db)
print count, "microRNA to target relationships imported"
for source in source_db:
print source,':',source_db[source],
print ''
return ensembl_mirna_db
def importmiRNATargetPredictions(species):
unigene_ensembl = import_ensembl_unigene(species)
symbol_ensembl = importEnsemblAnnotations(species)
filename = 'AltDatabase/'+species+'/SequenceData//microRNA-target-annotated.txt'
print 'importing', filename
fn=filepath(filename); ensembl_mirna_db={}; unigene_ids={}; x = 4000; z=0; nulls={};k=0
for line in open(fn,'r').xreadlines():
if k == 0:k=1
else:
line = string.replace(line,"'",''); line = string.replace(line,'"','')
data, newline = string.split(line,'\n')
refseqid, unigene, symbol, symbol2, llid, llrepprotacc, microrna, rank, pictar_score, annotation, num_anchor_sites, three_prime_utr_anchor_site_seq = string.split(data,'\t')
sequences = string.split(three_prime_utr_anchor_site_seq,' ')
ensembls = []
unigene_ids[unigene]=[]
if unigene in unigene_ensembl: ensembls = unigene_ensembl[unigene]
elif symbol in symbol_ensembl: ensembls = symbol_ensembl[symbol]
if len(ensembls)>5: print len(ensembls)
for ensembl in ensembls:
for sequence in sequences:
y = MicroRNAData(ensembl,microrna,sequence,'')
#y = microrna,three_prime_utr_anchor_site_seq,''
try: ensembl_mirna_db[ensembl].append(y)
except KeyError: ensembl_mirna_db[ensembl] = [y]
if len(ensembls)<1: nulls[unigene] = []
ensembl_mirna_db = eliminate_redundant_dict_values(ensembl_mirna_db)
print len(ensembl_mirna_db), "genes with associated microRNA data out of",len(unigene_ids)
return ensembl_mirna_db
class MicroRNAData:
def __init__(self, gene_id, microrna, sequence, source):
self._gene_id = gene_id; self._microrna = microrna; self._sequence = sequence; self._source = source
self.start_set = []
self.end_set = []
self.exon_sets=[]
def GeneID(self): return self._gene_id
def MicroRNA(self): return self._microrna
def Sequence(self): return self._sequence
def Source(self): return self._source
def setEnsExons(self,ens_exons): self.exon_sets+=ens_exons
def EnsExons(self):
exonsets_unique = unique.unique(self.exon_sets)
return string.join(exonsets_unique,'|')
def setCoordinates(self,chr,strand,start,end):
self.start_set.append(start)
self.end_set.append(end)
self.chr = chr
self.strand = strand
def Coordinates(self):
x=0; coords=[]
for i in self.start_set:
coord = self.Chr()+':'+str(i)+'-'+str(self.end_set[x])
coords.append(coord)
x+=1
coords = unique.unique(coords)
coords = string.join(coords,'|') ###If multiple coordinates
return coords
def Chr(self): return self.chr
def Strand(self): return self.strand
def SummaryValues(self):
output = self.GeneID()+'|'+self.MicroRNA()+'|'+self.Sequence()
return output
def __repr__(self): return self.SummaryValues()
def alignmiRNAData(array_type,mir_source,species,stringency,ensembl_mirna_db,splice_event_db):
output_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_ensembl_microRNAs.txt'
if mir_source == 'pictar': fn=filepath(output_file); data = open(fn,'w')
print "Aligning microRNAs to probesets"
added = {} ###not sure where
probeset_miRNA_db={}
for gene in ensembl_mirna_db:
for y in ensembl_mirna_db[gene]:
miRNA = y.MicroRNA(); miRNA_seq = y.Sequence(); sources = y.Source()
if mir_source == 'pictar':
if (gene,miRNA,miRNA_seq) not in added:
data.write(gene+'\t'+miRNA+'\t'+miRNA_seq+'\n'); added[(gene,miRNA,miRNA_seq)]=[]
if gene in splice_event_db:
for ed in splice_event_db[gene]:
probeset = ed.Probeset()
probeset_seq = ed.ExonSeq()
#exonid = ed.ExonID()
proceed = 'no'; hit = 'no'
if len(miRNA_seq)>0 and len(probeset_seq)>0:
miRNA_seqs = string.split(miRNA_seq,'|') ### Can be multiples
for mseq in miRNA_seqs:
if mseq in probeset_seq:
hit = 'yes'
if stringency == 'strict':
if '|' in sources: proceed='yes'
else: proceed='yes'
if proceed == 'yes' and hit == 'yes':
yc = getMicroRNAGenomicCoordinates(ed,y) ### Add's genomic coordinate information to these objects
try: probeset_miRNA_db[probeset].append(yc)
except KeyError: probeset_miRNA_db[probeset] = [yc]
if mir_source == 'pictar': data.close()
probeset_miRNA_db = eliminate_redundant_dict_values(probeset_miRNA_db)
if stringency == 'lax': export_type = 'any'
else: export_type = 'multiple'
output_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_'+export_type+'.txt'
coord_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_coord_microRNAs_'+export_type+'.txt'
k=0
fn=filepath(output_file)
cfn=filepath(coord_file)
data = open(fn,'w'); coord_data = open(cfn,'w')
for probeset in probeset_miRNA_db:
for y in probeset_miRNA_db[probeset]:
miRNA = y.MicroRNA(); miRNA_seq = y.Sequence(); sources = y.Source()
data.write(probeset+'\t'+miRNA+'\t'+miRNA_seq+'\t'+sources+'\n')
coord_data.write(probeset+'\t'+miRNA+'\t'+miRNA_seq+'\t'+sources+'\t'+y.GeneID()+'\t'+y.Coordinates()+'\t'+y.EnsExons()+'\n')
k+=1
data.close(); coord_data.close()
print k, 'entries written to', output_file
def getMicroRNAGenomicCoordinates(ed,y):
### This function is used for internal QC
miRNA = y.MicroRNA(); miRNA_seq = y.Sequence(); sources = y.Source()
ps_start = ed.ProbeStart(); ps_end = ed.ProbeStop(); strand = ed.Strand(); chr = ed.Chromosome()
probeset_seq = ed.ExonSeq()
mi_start = string.find(probeset_seq,miRNA_seq)
mi_end = mi_start+len(miRNA_seq)
if strand == '+':
genomic_start = ps_start+mi_start
genomic_end = ps_start+mi_end
else:
genomic_start = ps_end-mi_start
genomic_end = ps_end-mi_end
yc = copy.deepcopy(y) ### Multiple sites for the same miRNA can exist per gene
yc.setCoordinates(chr,strand,genomic_start,genomic_end)
yc.setEnsExons(ed.ExternalExonIDList())
return yc
def checkProbesetSequenceFile(species):
""" Check to see if the probeset sequence file is present and otherwise download AltAnalyze hosted version"""
probeset_seq_file = getProbesetSequenceFile(species)
if probeset_seq_file == None:
dir = '/AltDatabase/'+species+'/'+array_type
filename = update.getFileLocations(species,'exon_seq')
filename = dir[1:]+'/'+ filename
update.downloadCurrentVersion(filename,'exon','.fa')
probeset_seq_file = getProbesetSequenceFile(species)
return probeset_seq_file
def getProbesetSequenceFile(species):
probeset_seq_file = None
import_dir = '/AltDatabase'+'/'+species+'/exon'
filedir = import_dir[1:]+'/'
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for input_file in dir_list: #loop through each file in the directory to output results
### No probeset sequence file currently for gene arrays, so use the same exon region probeset sequence from the exon array
if '.probeset.fa' in input_file: probeset_seq_file = filedir+input_file
return probeset_seq_file
def runProgram(Species,Array_type,Process_microRNA_predictions,miR_source,Stringency):
global species; species = Species; global array_type; array_type = Array_type
global process_microRNA_predictions; process_microRNA_predictions = Process_microRNA_predictions
global mir_source; mir_source = miR_source
global stringency; stringency = Stringency
probeset_seq_file = checkProbesetSequenceFile(species)
probeset_annotations_file = 'AltDatabase/'+species+'/exon/'+species+'_Ensembl_probesets.txt'
splice_event_db = getParametersAndExecute(probeset_seq_file,array_type,species)
if process_microRNA_predictions == 'yes':
print 'stringency:',stringency
if mir_source == 'pictar':
ensembl_mirna_db = importmiRNATargetPredictions(species)
else:
ensembl_mirna_db = importmiRNATargetPredictionsAdvanced(species)
alignmiRNAData(array_type,mir_source,species,stringency,ensembl_mirna_db,splice_event_db)
if __name__ == '__main__':
species = 'Hs'; array_type = 'exon'
process_microRNA_predictions = 'yes'
mir_source = 'multiple'; stringency = 'strict'
print array_type
runProgram(species,array_type,process_microRNA_predictions,mir_source,stringency)
stringency = 'lax'
runProgram(species,array_type,process_microRNA_predictions,mir_source,stringency); sys.exit()
print "******Select Species*******"
print "1) Human"
print "2) Mouse"
print "3) Rat"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == '1': species = 'Hs'
if inp == '2': species = 'Mm'
if inp == '3': species = 'Rn'
print "******Source Data*******"
print "1) Multiple miR database sources"
print "2) PicTar"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == '1': mir_source = 'multiple'
if inp == '2': mir_source = 'pictar'
print "******Analysis Stringency*******"
print "1) Atleast two overlapping probeset-miR binding site predictions"
print "2) Any probeset-miR binding site predictions (lax)"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == '1': stringency = 'strict'
if inp == '2': stringency = 'lax'
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ExonSeqModule.py
|
ExonSeqModule.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import export
dirfile = unique
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".all" or entry[-5:] == ".data" or entry[-3:] == ".fa":
dir_list2.append(entry)
return dir_list2
def returnDirectories(sub_dir):
dir=os.path.dirname(dirfile.__file__)
dir_list = os.listdir(dir + sub_dir)
###Below code used to prevent FILE names from being included
dir_list2 = []
for entry in dir_list:
if "." not in entry: dir_list2.append(entry)
return dir_list2
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term in data_dir: matches.append(data_dir)
return matches
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
############### Main Program ###############
def importAnnotationData(filename):
fn=filepath(filename); x=1
global gene_symbol_db; gene_symbol_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene = t[0]
try: symbol = t[1]
except IndexError: symbol = ''
if len(symbol)>0: gene_symbol_db[gene] = symbol
def importGeneData(filename,data_type):
fn=filepath(filename); x=0; gene_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:x=1
else:
proceed = 'yes'
if data_type == 'junction': gene, region5, region3 = t; value_str = region5+':'+region3
if data_type == 'feature':
probeset, gene, feature, region = t; value_str = region,feature+':'+region+':'+probeset ###access region data later
#if (gene,region) not in region_db: region_db[gene,region] = feature,probeset ### Needed for processed structure table (see two lines down)
try: region_db[gene,region].append((feature,probeset)) ### Needed for processed structure table (see two lines down)
except KeyError: region_db[gene,region] = [(feature,probeset)]
try: region_count_db[(gene,region)]+=1
except KeyError: region_count_db[(gene,region)]=1
###have to add in when parsing structure probeset values for nulls (equal to 0)
if data_type == 'structure':
gene, exon, type, block, region, const, start, annot = t; region_id = exon
if len(annot)<1: annot = '---'
if (gene,exon) in region_db:
probeset_data = region_db[(gene,exon)]
for (feature,probeset) in probeset_data:
count = str(region_count_db[(gene,exon)]) ###here, feature is the label (reversed below)
value_str = feature+':'+exon+':'+probeset+':'+type+':'+count+':'+const+':'+start+':'+annot
if gene in gene_symbol_db: ###Only incorporate gene data with a gene symbol, since Cytoscape currently requires this
try: gene_db[gene].append(value_str)
except KeyError: gene_db[gene] = [value_str]
proceed = 'no'
else: ### Occurs when no probeset is present: E.g. the imaginary first and last UTR region if doesn't exit
feature = exon ###feature contains the region information, exon is the label used in Cytoscape
exon,null = string.split(exon,'.')
probeset = '0'
count = '1'
null_value_str = exon,exon+':'+feature+':'+probeset ###This is how Alex has it... to display the label without the '.1' first
try: feature_db[gene].append(null_value_str)
except KeyError: feature_db[gene] = [null_value_str]
value_str = exon+':'+feature+':'+probeset+':'+type+':'+count+':'+const+':'+start+':'+annot
if gene in structure_region_db:
order_db = structure_region_db[gene]
order_db[exon] = block
else:
order_db = {}
order_db[exon] = block
structure_region_db[gene] = order_db
if gene in gene_symbol_db and proceed == 'yes': ###Only incorporate gene data with a gene symbol, since Cytoscape currently requires this
try: gene_db[gene].append(value_str)
except KeyError: gene_db[gene] = [value_str]
return gene_db
def exportData(gene_db,data_type,species):
export_file = 'AltDatabase/ensembl/SubGeneViewer/'+species+'/Xport_sgv_'+data_type+'.csv'
if data_type == 'feature': title = 'gene'+'\t'+'symbol'+'\t'+'sgv_feature'+'\n'
if data_type == 'structure': title = 'gene'+'\t'+'symbol'+'\t'+'sgv_structure'+'\n'
if data_type == 'splice': title = 'gene'+'\t'+'symbol'+'\t'+'sgv_splice'+'\n'
data = export.createExportFile(export_file,'AltDatabase/ensembl/SubGeneViewer/'+species)
#fn=filepath(export_file); data = open(fn,'w')
data.write(title)
for gene in gene_db:
try:
symbol = gene_symbol_db[gene]
value_str_list = gene_db[gene]
value_str = string.join(value_str_list,',')
values = string.join([gene,symbol,value_str],'\t')+'\n'; data.write(values)
except KeyError: null = []
data.close()
print "exported to",export_file
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def reorganizeData(species):
global region_db; global region_count_db; global structure_region_db; global feature_db
region_db={}; region_count_db={}; structure_region_db={}
import_dir = '/AltDatabase/ensembl/'+species
g = GrabFiles(); g.setdirectory(import_dir)
exon_struct_file = g.searchdirectory('exon-structure')
feature_file = g.searchdirectory('feature-data')
junction_file = g.searchdirectory('junction-data')
annot_file = g.searchdirectory('Ensembl-annotations.')
importAnnotationData(annot_file[0])
### Run the files through the same function which has options for different pieces of data. Feature data is processed a bit differently
### since fake probeset data is supplied for intron and UTR features not probed for
splice_db = importGeneData(junction_file[0],'junction')
feature_db = importGeneData(feature_file[0],'feature')
structure_db = importGeneData(exon_struct_file[0],'structure')
for gene in feature_db:
order_db = structure_region_db[gene]
temp_list0 = []; temp_list = []; rank = 1
for (region,value_str) in feature_db[gene]:
###First, we have to get the existing order... this is important because when we sort, it screw up ranking within an intron with many probesets
temp_list0.append((rank,region,value_str)); rank+=1
for (rank,region,value_str) in temp_list0:
try: block_number = order_db[region]
except KeyError: print gene, region, order_db;kill
temp_list.append((int(block_number),rank,value_str)) ###Combine the original ranking plus the ranking included from taking into account regions not covered by probesets
temp_list.sort()
temp_list2 = []
for (block,rank,value_str) in temp_list:
temp_list2.append(value_str)
feature_db[gene] = temp_list2
exportData(splice_db,'splice',species)
exportData(structure_db,'structure',species)
exportData(feature_db,'feature',species)
if __name__ == '__main__':
dirfile = unique
species = 'Hs'
reorganizeData(species)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/SubGeneViewerExport.py
|
SubGeneViewerExport.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
def filepath(filename):
return filename
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
#data = string.replace(data,'"','')
return data
def findParentDir(filename):
### :: reverses string
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'//','/') ### If /// present
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[:x]
def findFilename(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'//','/') ### If /// present
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[x:]
def covertAffyFormatToBED(filename, ConversionDB=None):
print 'processing:',filename
parent = findParentDir(filename)
if ConversionDB==None:
output_file = 'simple_chr.bed'
else:
output_file = findFilename(filename)
output_file = string.replace(output_file,'mm9','mm10')
export_obj = open(parent+'/'+output_file,'w')
fn=filepath(filename); entry_count=0; readfiles = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data[0]=='#': readfiles = False
elif readfiles==False:
readfiles = True
if ConversionDB!=None:
export_obj.write(line) ### Write header
else:
try:
t = string.split(data[1:-1],'","')
probeset_id,chr,strand,start,stop = t[:5]
int(start)
if ConversionDB==None:
if 'chr' in chr:
export_obj.write(chr+'\t'+start+'\t'+stop+'\t'+probeset_id+'\n')
else:
chr,start,stop = ConversionDB[probeset_id]
t = [probeset_id,chr,strand,start,stop] + t[5:]
values = '"'+string.join(t,'","')+'"\n'
export_obj.write(values)
entry_count+=1
except Exception:
pass
export_obj.close()
print entry_count, 'entries saved to:',parent+'/'+output_file
def importConvertedBED(filename):
print 'processing:',filename
parent = findParentDir(filename)
fn=filepath(filename); entry_count=0; newCoordinates={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data[0]!='#':
try:
t = string.split(data,'\t')
chr,start,stop,probeset_id = t
int(start)
if 'chr' in chr:
entry_count+=1
newCoordinates[probeset_id] = chr,start,stop
except ZeroDivisionError:
pass
print entry_count, 'imported and saved.'
return newCoordinates
filename = '/Users/saljh8/Downloads/MoGene-1_0-st-v1-1/MoGene-1_0-st-v1.na33.2.mm9.probeset.csv'
bed_output = '/Users/saljh8/Downloads/MoGene-1_0-st-v1-1/input.bed'
#covertAffyFormatToBED(filename);sys.exit()
newCoordinates = importConvertedBED(bed_output)
covertAffyFormatToBED(filename,ConversionDB=newCoordinates)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/LiftOverAffy.py
|
LiftOverAffy.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains methods for reading the Ensembl SQL FTP directory structure to
identify appropriate species and systems for download for various versions of Ensembl,
download the necessary database files to reconstruct any BioMart annotation files and
determine genomic coordinates for the start and end positions of protein domains."""
try: clearall()
except NameError: null = [] ### Occurs when re-running the script to clear all global variables
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import export
import traceback
import update; reload(update)
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def getChrGeneOnly(Species,configType,ensembl_version,force):
global species; species = Species
global rewrite_existing; rewrite_existing = 'yes'
print 'Downloading Ensembl flat files for parsing from Ensembl SQL FTP server...'
global ensembl_build
ensembl_sql_dir, ensembl_sql_description_dir = getEnsemblSQLDir(ensembl_version)
print ensembl_sql_dir
ensembl_build = string.split(ensembl_sql_dir,'core')[-1][:-1]
sql_file_db,sql_group_db = importEnsemblSQLInfo(configType) ###Import the Config file with the files and fields to parse from the downloaded SQL files
filtered_sql_group_db={} ### Get only these tables
filtered_sql_group_db['Primary'] = ['gene.txt','gene_stable_id.txt','seq_region.txt']
filtered_sql_group_db['Description'] = [ensembl_sql_description_dir]
sq = EnsemblSQLInfo(ensembl_sql_description_dir, 'EnsemblSQLDescriptions', 'Description', '', '', '')
sql_file_db['Primary',ensembl_sql_description_dir] = sq
output_dir = 'AltDatabase/ensembl/'+species+'/EnsemblSQL/'
importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,filtered_sql_group_db,sql_file_db,output_dir,'Primary',force) ###Download and import the Ensembl SQL files
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze':
parent_dir = 'AltDatabase/goelite/'
else:
parent_dir = 'Databases/'
output_dir = parent_dir+species+'/uid-gene/Ensembl-chr.txt'
headers = ['Ensembl Gene ID', 'Chromosome']
values_list=[]
for geneid in gene_db:
gi = gene_db[geneid]
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
seq_region_id = gi.SeqRegionId()
chr = seq_region_db[seq_region_id].Name()
values = [ens_gene,chr]
values_list.append(values)
exportEnsemblTable(values_list,headers,output_dir)
def getGeneTranscriptOnly(Species,configType,ensembl_version,force):
global species; species = Species
global rewrite_existing; rewrite_existing = 'yes'
print 'Downloading Ensembl flat files for parsing from Ensembl SQL FTP server...'
global ensembl_build
ensembl_sql_dir, ensembl_sql_description_dir = getEnsemblSQLDir(ensembl_version)
print ensembl_sql_dir
ensembl_build = string.split(ensembl_sql_dir,'core')[-1][:-1]
sql_file_db,sql_group_db = importEnsemblSQLInfo(configType) ###Import the Config file with the files and fields to parse from the downloaded SQL files
filtered_sql_group_db={} ### Get only these tables
filtered_sql_group_db['Primary'] = ['gene.txt','gene_stable_id.txt','transcript.txt','transcript_stable_id.txt']
filtered_sql_group_db['Description'] = [ensembl_sql_description_dir]
sq = EnsemblSQLInfo(ensembl_sql_description_dir, 'EnsemblSQLDescriptions', 'Description', '', '', '')
sql_file_db['Primary',ensembl_sql_description_dir] = sq
output_dir = 'AltDatabase/ensembl/'+species+'/EnsemblSQL/'
additionalFilter = ['transcript','gene']
importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,filtered_sql_group_db,sql_file_db,output_dir,'Primary',force) ###Download and import the Ensembl SQL files
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze':
parent_dir = 'AltDatabase/goelite/'
else:
parent_dir = 'Databases/'
output_dir = parent_dir+species+'/uid-gene/Ensembl-EnsTranscript.txt'
headers = ['Ensembl Gene ID', 'Ensembl Transcript ID']
values_list=[]
for transcript_id in transcript_db:
ti = transcript_db[transcript_id]
try: ens_gene = gene_db[ti.GeneId()].StableId()
except Exception: ens_gene = gene_stable_id_db[ti.GeneId()].StableId()
try: ens_transcript = transcript_db[transcript_id].StableId()
except Exception: ens_transcript = transcript_stable_id_db[transcript_id].StableId()
values = [ens_gene,ens_transcript]
values_list.append(values)
exportEnsemblTable(values_list,headers,output_dir)
def getEnsemblSQLDir(ensembl_version):
if 'Plus' in ensembl_version:
ensembl_version = string.replace(ensembl_version,'Plus','')
if 'EnsMart' in ensembl_version:
ensembl_version = string.replace(ensembl_version, 'EnsMart','')
original_version = ensembl_version
if 'Plant' in ensembl_version:
ensembl_version = string.replace(ensembl_version, 'Plant','')
if 'Bacteria' in ensembl_version:
ensembl_version = string.replace(ensembl_version, 'Bacteria','')
if 'Fungi' in ensembl_version:
ensembl_version = string.replace(ensembl_version, 'Fungi','')
try:
check = int(ensembl_version)
import UI; UI.exportDBversion('EnsMart'+ensembl_version)
ensembl_version = 'release-'+ensembl_version
except Exception: print 'EnsMart version name is incorrect (e.g., should be "60")'; sys.exit()
import UI; species_names = UI.getSpeciesInfo()
try:
species_full = species_names[species]
ens_species = string.replace(string.lower(species_full),' ','_')
except Exception:
ens_species = species
species_full = species
try: child_dirs, ensembl_species, ensembl_versions = getCurrentEnsemblSpecies(original_version)
except Exception:
print "\nPlease try a different version. This one does not appear to be valid."
print traceback.format_exc();sys.exit()
ensembl_sql_dir,ensembl_sql_description_dir = child_dirs[species_full]
return ensembl_sql_dir, ensembl_sql_description_dir
def buildEnsemblRelationalTablesFromSQL(Species,configType,analysisType,externalDBName,ensembl_version,force,buildCommand=None):
import UI; import update; global external_xref_key_db; global species; species = Species
global system_synonym_db; system_synonym_db={} ### Currently only used by GO-Elite
global rewrite_existing; rewrite_existing = 'yes'; global all_external_ids; all_external_ids={}; global added_systems; added_systems={}
original_version = ensembl_version
print 'Downloading Ensembl flat files for parsing from Ensembl SQL FTP server...'
### Get version directories for Ensembl
global ensembl_build
ensembl_sql_dir, ensembl_sql_description_dir = getEnsemblSQLDir(ensembl_version)
ensembl_build = string.split(ensembl_sql_dir,'core')[-1][:-1]
sql_file_db,sql_group_db = importEnsemblSQLInfo(configType) ###Import the Config file with the files and fields to parse from the downloaded SQL files
sql_group_db['Description'] = [ensembl_sql_description_dir]
sq = EnsemblSQLInfo(ensembl_sql_description_dir, 'EnsemblSQLDescriptions', 'Description', '', '', '')
sql_file_db['Primary',ensembl_sql_description_dir] = sq
output_dir = 'AltDatabase/ensembl/'+species+'/EnsemblSQL/'
importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Primary',force) ###Download and import the Ensembl SQL files
if analysisType != 'ExternalOnly':
### Build and export the basic Ensembl gene annotation table
xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Xref',force)
buildEnsemblGeneAnnotationTable(species,xref_db)
if analysisType == 'ExternalOnly':
###Export data for Ensembl-External gene system
buildFilterDBForExternalDB(externalDBName)
xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Xref',force)
external_xref_key_db = xref_db; #resetExternalFilterDB()
object_xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Object-Xref',force)
export_status='yes';output_id_type='Gene'
buildEnsemblExternalDBRelationshipTable(externalDBName,xref_db,object_xref_db,output_id_type,species)
if analysisType == 'AltAnalyzeDBs':
###Export data for Ensembl-External gene system
buildTranscriptStructureTable()
if buildCommand == 'exon':
return None
###Export transcript biotype annotations (e.g., NMD)
exportTranscriptBioType()
###Get Interpro AC display name
buildFilterDBForExternalDB('Interpro')
xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Xref',force)
getDomainGenomicCoordinates(species,xref_db)
if force == 'yes':
### Download Transcript seqeunces
getEnsemblTranscriptSequences(original_version,species)
def getEnsemblTranscriptSequences(ensembl_version,species,restrictTo=None):
### Download Seqeunce data
import UI; species_names = UI.getSpeciesInfo()
species_full = species_names[species]
ens_species = string.replace(string.lower(species_full),' ','_')
if 'release-' not in ensembl_version:
ensembl_version = 'release-'+ensembl_version
if restrictTo == None or restrictTo == 'protein':
dirtype = 'fasta/'+ens_species+'/pep'
if 'Plant' in ensembl_version or 'Bacteria' in ensembl_version or 'Fungi' in ensembl_version:
ensembl_protseq_dir = getCurrentEnsemblGenomesSequences(ensembl_version,dirtype,ens_species)
else:
ensembl_protseq_dir = getCurrentEnsemblSequences(ensembl_version,dirtype,ens_species)
output_dir = 'AltDatabase/ensembl/'+species + '/'
gz_filepath, status = update.download(ensembl_protseq_dir,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
if restrictTo == None or restrictTo == 'cDNA':
dirtype = 'fasta/'+ens_species+'/cdna'
if 'Plant' in ensembl_version or 'Bacteria' in ensembl_version or 'Fungi' in ensembl_version:
ensembl_cdnaseq_dir = getCurrentEnsemblGenomesSequences(ensembl_version,dirtype,ens_species)
else:
ensembl_cdnaseq_dir = getCurrentEnsemblSequences(ensembl_version,dirtype,ens_species)
output_dir = 'AltDatabase/'+species + '/SequenceData/'
#print [ensembl_cdnaseq_dir]
gz_filepath, status = update.download(ensembl_cdnaseq_dir,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
def getFullGeneSequences(ensembl_version,species):
import UI; species_names = UI.getSpeciesInfo()
try:
species_full = species_names[species]
ens_species = string.replace(string.lower(species_full),' ','_')
except Exception:
ens_species = species
species_full = species
if 'EnsMart' in ensembl_version:
ensembl_version = string.replace(ensembl_version, 'EnsMart','')
if 'release-' not in ensembl_version:
ensembl_version = 'release-'+ensembl_version
dirtype = 'fasta/'+ens_species+'/dna'
if 'Plant' in ensembl_version or 'Bacteria' in ensembl_version or 'Fungi' in ensembl_version:
ensembl_dnaseq_dirs = getCurrentEnsemblGenomesSequences(ensembl_version,dirtype,ens_species)
else:
ensembl_dnaseq_dirs = getCurrentEnsemblSequences(ensembl_version,dirtype,ens_species)
output_dir = 'AltDatabase/'+species + '/SequenceData/chr/'
global dna
dna = export.ExportFile(output_dir+species+'_gene-seq-2000_flank.fa')
print 'Writing gene sequence file plus 2000 flanking base-pairs'
from build_scripts import EnsemblImport
chr_gene_db,location_gene_db = EnsemblImport.getEnsemblGeneLocations(species,'','key_by_chromosome')
for (chr_file,chr_url) in ensembl_dnaseq_dirs:
#if 'MT' in chr_file:
if 'toplevel' in chr_file:
gz_filepath, status = update.download(chr_url,output_dir,'')
dna_db = divideUpTopLevelChrFASTA(output_dir+chr_file[:-3],output_dir,'store') ### Would work nice, but too file intensive
print len(dna_db), 'scaffolds read into memory'
for chr in chr_gene_db:
if chr in dna_db:
#print chr,'found'
parseChrFASTA(dna_db[chr],chr,chr_gene_db[chr],location_gene_db)
else: print chr,'not found'
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except Exception: null=[]
try: os.remove(gz_filepath[:-3]) ### Delete the chromosome sequence file
except Exception: null=[]
else:
chr=string.split(chr_file,'.')[-3]
if chr in chr_gene_db:
gz_filepath, status = update.download(chr_url,output_dir,'')
parseChrFASTA(output_dir+chr_file[:-3],chr,chr_gene_db[chr],location_gene_db)
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except Exception: null=[]
try: os.remove(gz_filepath[:-3]) ### Delete the chromosome sequence file
except Exception: null=[]
dna.close()
def divideUpTopLevelChrFASTA(filename,output_dir,action):
""" When individual chromosome files are not present, parse the TopLevel file which contains segments divided by scaffold rather
than individual chromosomes. Since the Scaffolds are reported rather than the chromosomes for genes in the other Ensembl annotation
files, the scaffolds can serve as a surrogate for individual chromosome files when parsed out."""
fn=filepath(filename); fasta=[]; sdna=None
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if '>' == data[0]:
### Save values from previous scaffold
if sdna != None:
if action == 'write':
for ln in fasta: sdna.write(ln)
sdna.close()
else: sdna[scaffold_id] = [scaffold_line]+fasta
#if scaffold_id == 'scaffold_753': print scaffold_id, scaffold_line, fasta;kill
fasta=[]
else: sdna = {} ### performed once
### Create new export object
scaffold_id=string.split(data[1:],' ')[0]; scaffold_line = line
if action == 'write':
export_dir = string.replace(filename,'toplevel','chromosome.'+scaffold_id)
sdna = export.ExportFile(export_dir)
sdna.write(line)
else: fasta.append(line)
if action == 'write':
for ln in fasta: sdna.write(ln)
sdna.close()
else: sdna[scaffold_id] = [scaffold_line]+fasta
return sdna
def parseChrFASTA(filename,chr,chr_gene_locations,location_gene_db):
""" This function parses FASTA formatted chromosomal sequence, storing only sequence needed to get the next gene plus any buffer seqence, in memory.
Note: not straight forward to follow initially, as some tricks are needed to get the gene plus buffer sequence at the ends of the chromosome"""
genes_to_be_examined={}
for (ch,cs,ce) in location_gene_db:
gene,strand = location_gene_db[ch,cs,ce]
if ch==chr:
genes_to_be_examined[gene]=[ch,cs,ce]
from build_scripts import EnsemblImport
if ('/' in filename) or ('\\' in filename): ### Occurs when parsing a toplevel chromosome sequence file
print "Parsing chromosome %s sequence..." % chr
fn=filepath(filename)
sequence_data = open(fn,'rU').xreadlines()
readtype = 'file'
else:
"""The scaffold architecture of some species sequence files creates a challenge to integrate non-chromosomal sequence,
however, we still want to analyze it in the same way as chromosomal sequence. One alternative is to write out each scaffold
it's own fasta file. The problem with this is that it creates dozens of thousands of files which is time consuming and messy
- see divideUpTopLevelChrFASTA(). To resolve this, we instead store the sequence in a dictionary as sequence lines, but read
in the sequence lines just like they are directly read from the fasta sequence, allowing both pipelines to work the same
(rather than maintain two complex functions that do the same thing)."""
scaffold_line_db = filename
sequence_data = scaffold_line_db
#print len(scaffold_line_db)
readtype = 'dictionary'
chr_gene_locations.sort()
cs=chr_gene_locations[0][0]; ce=chr_gene_locations[0][1]; buffer=2000; failed=0; gene_count=0; terminate_chr_analysis='no'
max_ce = chr_gene_locations[-1][1]; genes_analyzed={}
gene,strand = location_gene_db[chr,cs,ce]
x=0; sequence=''; running_seq_length=0; tr=0 ### tr is the total number of nucleotides removed (only store sequence following the last gene)
for line in sequence_data:
data = cleanUpLine(line)
if x==0:
#>MT dna:chromosome chromosome:GRCh37:MT:1:16569:1
max_chr_length = int(string.split(data,':')[-2])-70
x=1
else:
iterate = 'yes' ### Since the buffer region can itself contain multiple genes, we may need to iterature through the last stored sequence set for multiple genes
sequence += data; running_seq_length+=len(data)
if (ce+buffer)>=max_chr_length: target_seq_length = max_chr_length
else: target_seq_length = ce+buffer
if running_seq_length>=target_seq_length:
while iterate == 'yes':
internal_buffer = buffer
adj_start = cs-tr-buffer-1; adj_end = ce-tr+buffer; original_adj_start=adj_start
if adj_start<0: original_adj_start = abs(adj_start); internal_buffer=buffer-(adj_start*-1); adj_start=0
try:
gene_seq = sequence[adj_start:adj_end]
### Add "N" to the sequence, before and after, if the gene starts to close the chromosome end for the buffer
if adj_start==0: gene_seq=original_adj_start*'N'+gene_seq
elif len(gene_seq) != (adj_end-adj_start): gene_seq+=((adj_end-adj_start)-len(gene_seq))*'N'
start=str(cs-buffer); end=str(ce+buffer)
### write this out
if strand=='-': gene_seq=EnsemblImport.reverse_orientation(gene_seq)
header = string.join(['>'+gene,chr,str(cs),str(ce)],'|') #['>'+gene,chr,start,end]
#print header, cs, ce, strand,adj_start,adj_end,len(sequence),len(gene_seq)
#print x,[gene_seq]
if gene not in genes_analyzed: ### Indicates something is iterating where it shouldn't be, but doesn't seem to create a signficant data handling issue
dna.write(header+'\n'); dna.write(gene_seq+'\n'); x+=1
genes_analyzed[gene]=[]
except KeyError: failed+=1; kill ### gene is too close to chromosome end to add buffer
seq_start = adj_start; tr = cs-internal_buffer-1; sequence=sequence[seq_start:]; gene_count+=1
try:
cs=chr_gene_locations[gene_count][0]; ce=chr_gene_locations[gene_count][1]
gene,strand = location_gene_db[chr,cs,ce]
except IndexError: terminate_chr_analysis = 'yes'; iterate='no'; break ### no more genes to examine on this chromosome
if len(data)<60: iterate='yes' ### Forces re-iteration through the last stored sequence set
else: iterate='no'
if terminate_chr_analysis == 'yes': break
if ('/' in filename) or ('\\' in filename):
print "Sequence for",len(genes_analyzed),"out of",len(genes_to_be_examined),"genes exported..."
"""
for gene in genes_to_be_examined:
if gene not in genes_analyzed:
print gene, genes_to_be_examined[gene]
sys.exit()"""
elif len(genes_analyzed) != len(genes_to_be_examined):
print len(genes_to_be_examined)-len(genes_analyzed), 'not found for',chr
def buildTranscriptStructureTable():
### Function for building Ensembl transcription annotation file
temp_values_list = []; output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
headers = ['Ensembl Gene ID', 'Chromosome', 'Strand', 'Exon Start (bp)', 'Exon End (bp)', 'Ensembl Exon ID', 'Constitutive Exon', 'Ensembl Transcript ID']
for eti in exon_transcript_db:
exonid = eti.ExonId(); transcript_id = eti.TranscriptId(); rank = eti.Rank()
ei = exon_db[exonid]; seq_region_start = ei.SeqRegionStart(); seq_region_end = ei.SeqRegionEnd()
try: constitutive_call = str(ei.IsConstitutive())
except Exception: constitutive_call = '0'
try: ens_exon = exon_db[exonid].StableId()
except Exception: ens_exon = exon_stable_id_db[exonid].StableId()
ti = transcript_db[transcript_id]; geneid = ti.GeneId()
try: ens_transcript = transcript_db[transcript_id].StableId()
except Exception: ens_transcript = transcript_stable_id_db[transcript_id].StableId()
gi = gene_db[geneid]; seq_region_id = gi.SeqRegionId(); strand = gi.SeqRegionStrand()
chr = seq_region_db[seq_region_id].Name()
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
values = [geneid,transcript_id,rank,ens_gene,chr,str(strand),str(seq_region_start),str(seq_region_end),ens_exon,constitutive_call,ens_transcript]
temp_values_list.append(values)
values_list=[]; temp_values_list.sort() ###Make sure the gene, transcripts and exons are grouped and ranked appropriately
for values in temp_values_list: values_list.append(values[3:])
temp_values_list=[]
exportEnsemblTable(values_list,headers,output_dir)
def exportTranscriptBioType():
### Function for extracting biotype annotations for transcripts
transcript_protein_id={}
for protein_id in translation_db:
ti = translation_db[protein_id]; transcript_id = ti.TranscriptId()
transcript_protein_id[transcript_id] = protein_id
values_list = []; output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt'
headers = ['Ensembl Gene ID','Ensembl Translation ID', 'Biotype']
for transcript_id in transcript_db:
ti = transcript_db[transcript_id]
try: ens_transcript = transcript_db[transcript_id].StableId()
except Exception: ens_transcript = transcript_stable_id_db[transcript_id].StableId()
try:
try: protein_id = transcript_protein_id[transcript_id]; ens_protein = translation_db[protein_id].StableId()
except Exception: protein_id = transcript_protein_id[transcript_id]; ens_protein = translation_stable_id_db[protein_id].StableId()
except Exception: ens_protein = ens_transcript+'-PEP'
geneid = ti.GeneId(); geneid = ti.GeneId()
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
values = [ens_gene,ens_protein,ti.Biotype()]
values_list.append(values)
exportEnsemblTable(values_list,headers,output_dir)
def aaselect(nt_length):
### Convert to protein length
if (float(nt_length)/3) == (int(nt_length)/3):
return nt_length/3
else:
return int(string.split(str(float(nt_length)/3),'.')[0])+1
def getDomainGenomicCoordinates(species,xref_db):
### Get all transcripts relative to genes to export gene, transcript and protein ID relationships
transcript_protein_id={}
for protein_id in translation_db:
ti = translation_db[protein_id]; transcript_id = ti.TranscriptId()
transcript_protein_id[transcript_id] = protein_id
output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_Protein_'+ensembl_build+'.tab'
headers = ['Gene', 'Trans', 'Protein']; values_list=[]
for transcript_id in transcript_db:
geneid = transcript_db[transcript_id].GeneId()
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
try:
try: protein_id = transcript_protein_id[transcript_id]; ens_protein = translation_db[protein_id].StableId()
except Exception: protein_id = transcript_protein_id[transcript_id]; ens_protein = translation_stable_id_db[protein_id].StableId()
except KeyError: ens_protein = ''
try: ens_transcript = transcript_db[transcript_id].StableId()
except Exception: ens_transcript = transcript_stable_id_db[transcript_id].StableId()
values_list.append([ens_gene,ens_transcript,ens_protein])
exportEnsemblTable(values_list,headers,output_dir)
### Function for building Ensembl transcription annotation file
exon_transcript_db2={} ### exon_transcript_db has data stored as a list, so store as a ranked db
for eti in exon_transcript_db:
transcript_id = eti.TranscriptId(); rank = eti.Rank()
try: exon_transcript_db2[transcript_id].append((rank,eti))
except KeyError: exon_transcript_db2[transcript_id] = [(rank,eti)]
interpro_annotation_db={}
for xref_id in xref_db:
interprot_id = xref_db[xref_id].DbprimaryAcc(); display_label = xref_db[xref_id].DisplayLabel()
interpro_annotation_db[interprot_id] = display_label
### Get the protein coding positions for each exon, to later determine the genomic position of non-InterPro features (way downstream)
### This code was adapted from the following paragraph to get InterPro locations (this is simpiler)
output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_ProteinCoordinates_build'+ensembl_build+'.tab'
headers = ['protienID', 'exonID', 'AA_NT_Start', 'AA_NT_Stop', 'Genomic_Start', 'Genomic_Stop']; values_list=[]
cds_output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_EnsemblTranscriptCDSPositions.tab'
cds_headers = ['transcriptID', 'CDS_Start', 'CDS_Stop']; cds_values_list=[]
protein_coding_exon_db={}; protein_length_db={}
### Get the bp position (relative to the exon not genomic) for each transcript, where protein translation begins and ends. Store with the exon data.
for protein_id in translation_db:
ti = translation_db[protein_id]; transcript_id = ti.TranscriptId()
seq_start = ti.SeqStart(); start_exon_id = ti.StartExonId(); seq_end = ti.SeqEnd(); end_exon_id = ti.EndExonId()
eti_list = exon_transcript_db2[transcript_id]; eti_list.sort()
### Get info for exporting exon protein coordinate data
try: ens_protein = translation_db[protein_id].StableId()
except Exception: ens_protein = translation_stable_id_db[protein_id].StableId()
try: ens_transcript = transcript_db[transcript_id].StableId()
except Exception: ens_transcript = transcript_stable_id_db[transcript_id].StableId()
#if ens_protein == 'ENSDARP00000087122':
cumulative_exon_length = 0
tis = transcript_db[transcript_id]; geneid = tis.GeneId(); gi = gene_db[geneid]; strand = gi.SeqRegionStrand() #Get strand
if strand == '-1': start_correction = -3; end_correction = 2
else: start_correction = 0; end_correction = 0
translation_pos1=0; translation_pos2=0 ### Indicate amino acid positions in nt space, begining and ending in the exon
coding_exons = []; ce=0
for (rank,eti) in eti_list:
exonid = eti.ExonId()
try: ens_exon = exon_db[exonid].StableId()
except Exception: ens_exon = exon_stable_id_db[exonid].StableId()
ei = exon_db[exonid]; genomic_exon_start = ei.SeqRegionStart(); genomic_exon_end = ei.SeqRegionEnd()
exon_length = abs(genomic_exon_end-genomic_exon_start)+1
#print exonid,start_exon_id,end_exon_id
if exonid == start_exon_id: ### Thus we are in the start exon for this transcript
coding_bp_in_exon = exon_length - seq_start; eti.setCodingBpInExon(coding_bp_in_exon) ### -1 since coding can't start at 0, but rather 1 at a minimum
#print 'start',genomic_exon_start,genomic_exon_end,exon_length,seq_start;kill
coding_exons.append(eti); ce+=1; translation_pos2=coding_bp_in_exon+1
if strand == -1:
genomic_translation_start = ei.SeqRegionStart()+coding_bp_in_exon+start_correction
genomic_exon_end = ei.SeqRegionStart()+end_correction
else:
genomic_translation_start = ei.SeqRegionEnd()-coding_bp_in_exon
genomic_exon_end = ei.SeqRegionEnd()
#print 'trans1:',float(translation_pos1)/3, float(translation_pos2)/3
values_list.append([ens_protein,ens_exon,1,aaselect(translation_pos2),genomic_translation_start,genomic_exon_end])
cds_start = seq_start+cumulative_exon_length ### start position in this exon plus last exon cumulative length
elif exonid == end_exon_id: ### Thus we are in the end exon for this transcript
coding_bp_in_exon = seq_end; eti.setCodingBpInExon(coding_bp_in_exon)
coding_exons.append(eti); ce = 0
translation_pos1=translation_pos2+1
translation_pos2=translation_pos1+coding_bp_in_exon-4
if strand == -1:
genomic_translation_start = ei.SeqRegionEnd()+end_correction+start_correction
genomic_exon_end = ei.SeqRegionEnd()-coding_bp_in_exon+end_correction
else:
genomic_translation_start = ei.SeqRegionStart()
genomic_exon_end = ei.SeqRegionStart()+coding_bp_in_exon
#print 'trans1:',float(translation_pos1)/3, float(translation_pos2)/3
values_list.append([ens_protein,ens_exon,aaselect(translation_pos1),aaselect(translation_pos2),genomic_translation_start,genomic_exon_end])
cds_end = seq_end+cumulative_exon_length ### start position in this exon plus last exon cumulative length
cds_values_list.append([ens_transcript,cds_start,cds_end])
#ens_exon = exon_stable_id_db[exonid].StableId()
elif ce != 0: ###If we are in coding exons
eti.setCodingBpInExon(exon_length)
coding_exons.append(eti)
translation_pos1=translation_pos2+1 ### 1 nucleotide difference from the last exon position
translation_pos2=translation_pos1+exon_length-1
if strand == -1:
genomic_translation_start = ei.SeqRegionEnd()+start_correction
genomic_exon_end = ei.SeqRegionStart()+end_correction
else:
genomic_translation_start = ei.SeqRegionStart()
genomic_exon_end = ei.SeqRegionEnd()
#print 'trans1:',float(translation_pos1)/3,float(translation_pos2)/3
values_list.append([ens_protein,ens_exon,aaselect(translation_pos1),aaselect(translation_pos2),genomic_translation_start,genomic_exon_end])
cumulative_exon_length += exon_length
#ti = transcript_db[transcript_id]; geneid = ti.GeneId(); ens_gene = gene_stable_id_db[geneid].StableId()
#ens_exon = exon_stable_id_db[exonid].StableId()
#if ens_gene == 'ENSG00000106785':
#if ens_exon == 'ENSE00001381077':
#print exon_length, seq_start, coding_bp_in_exon;kill
#print 'start',ens_gene,rank,len(eti_list),exonid,ens_exon,start_exon_id,end_exon_id,genomic_exon_start,genomic_exon_end,exon_length,seq_start,coding_bp_in_exon,seq_end
protein_coding_exon_db[protein_id] = coding_exons
print 'Exporting protein-to-exon genomic position translations',len(values_list)
exportEnsemblTable(values_list,headers,output_dir)
exportEnsemblTable(cds_values_list,cds_headers,cds_output_dir)
#sys.exit()
### Using the exon coding positions, determine InterPro coding genomic locations
output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_ProteinFeatures_build'+ensembl_build+'.tab'
headers = ['ID', 'AA_Start', 'AA_Stop', 'Start', 'Stop', 'Name', 'Interpro', 'Description']
interprot_match=0; values_list=[]
#print len(protein_feature_db),len(translation_db),len(interpro_db),len(interpro_annotation_db);sys.exit()
for protein_feature_id in protein_feature_db:
pfi = protein_feature_db[protein_feature_id]; protein_id = pfi.TranslationId(); evalue = pfi.Evalue()
try: ens_protein = translation_db[protein_id].StableId()
except Exception: ens_protein = translation_stable_id_db[protein_id].StableId()
seq_start = pfi.SeqStart(); seq_end = pfi.SeqEnd(); hit_id = pfi.HitId() ### hit_id is the domain accession which maps to 'id' in interpro
seq_start = seq_start*3-2; seq_end = seq_end*3 ###convert to transcript basepair positions
coding_exons = protein_coding_exon_db[protein_id]
cumulative_coding_length = 0; last_exon_cumulative_coding_length = 1
ti = translation_db[protein_id]; transcript_id = ti.TranscriptId() ### Get transcript ID, to look up strand
ti = transcript_db[transcript_id]; geneid = ti.GeneId(); gi = gene_db[geneid]; strand = gi.SeqRegionStrand() #Get strand
if strand == '-1': start_correction = -3; end_correction = 2
else: start_correction = 0; end_correction = 0
if hit_id in interpro_db and evalue<1: ### Only analyze domain-level features that correspond to known protein feature IDs
interpro_ac = interpro_db[hit_id].InterproAc(); interprot_match+=1
if interpro_ac in interpro_annotation_db:
interpro_name = interpro_annotation_db[interpro_ac] ###Built this annotation database using the xref database
#print interpro_name,ens_protein,seq_start,seq_end
genomic_domain_start = 0; genomic_domain_end = 0; domain_in_first_exon = 'no'; non_coding_seq_len = 0
for eti in coding_exons:
domain_in_first_exon = 'no'
coding_bp_in_exon = eti.CodingBpInExon(); cumulative_coding_length += coding_bp_in_exon
if seq_start <= cumulative_coding_length and seq_start >= last_exon_cumulative_coding_length: ### Thus, domain starts in this exon
var = 'start'
exonid = eti.ExonId(); ei = exon_db[exonid]
if abs(ei.SeqRegionEnd()-ei.SeqRegionStart()+1) != coding_bp_in_exon:
### Can occur in the first exon
if last_exon_cumulative_coding_length<2:
domain_in_first_exon = 'yes'
non_coding_seq_len = abs(ei.SeqRegionEnd()-ei.SeqRegionStart()+1) - coding_bp_in_exon
genomic_bp_exon_offset = seq_start - last_exon_cumulative_coding_length + non_coding_seq_len
else:
genomic_bp_exon_offset = seq_start - last_exon_cumulative_coding_length
else: genomic_bp_exon_offset = seq_start - last_exon_cumulative_coding_length
if strand == -1:
genomic_exon_start = ei.SeqRegionEnd() ### This needs to be reversed if reverse strand
genomic_domain_start = genomic_exon_start-genomic_bp_exon_offset+start_correction ### This is what we want! (minus 3 so that we start at the first bp of that codon, not the first of the next (don't count the starting coding as 3bp)
else:
genomic_exon_start = ei.SeqRegionStart()
genomic_domain_start = genomic_bp_exon_offset+genomic_exon_start+start_correction ### This is what we want! (minus 3 so that we start at the first bp of that codon, not the first of the next (don't count the starting coding as 3bp)
#print genomic_exon_start,last_exon_cumulative_coding_length,genomic_domain_start,genomic_bp_exon_offset;kill
#pfi.setGenomicStart(genomic_domain_start)
if seq_end <= cumulative_coding_length and seq_end >= last_exon_cumulative_coding_length: ### Thus, domain ends in this exon
var = 'end'
exonid = eti.ExonId(); ei = exon_db[exonid]
genomic_bp_exon_offset = seq_end - last_exon_cumulative_coding_length
if (abs(ei.SeqRegionEnd()-ei.SeqRegionStart()+1) != coding_bp_in_exon) and domain_in_first_exon == 'yes': genomic_bp_exon_offset += non_coding_seq_len ### If the domain starts/ends in the first exon
if strand == -1:
genomic_exon_start = ei.SeqRegionEnd() ### This needs to be reversed if reverse strand
genomic_domain_end = genomic_exon_start-genomic_bp_exon_offset+end_correction ### This is what we want!
else:
genomic_exon_start = ei.SeqRegionStart()
genomic_domain_end = genomic_bp_exon_offset+genomic_exon_start+end_correction ### This is what we want!
#pfi.setGenomicEnd(genomic_domain_end)
#"""
#ens_exon = exon_stable_id_db[eti.ExonId()].StableId()
#if cumulative_coding_length == seq_end and strand == -1 and seq_start == 1 and domain_in_first_exon == 'yes':
#print interpro_name,protein_id,eti.ExonId(),ens_protein,ens_exon,seq_end,genomic_domain_start,genomic_domain_end;kill
if ens_protein == 'ENSMUSP00000097740':
print interpro_name, genomic_domain_start, genomic_domain_end, last_exon_cumulative_coding_length, seq_end, seq_start, non_coding_seq_len
#print 'coding_bp_in_exon, cumulative_coding_length, genomic_bp_exon_offset',exon_db[exonid].StableId(), coding_bp_in_exon, cumulative_coding_length, genomic_bp_exon_offset
if var == 'start':
print interpro_name, var,genomic_exon_start,genomic_bp_exon_offset,start_correction, ei.SeqRegionStart(), ei.SeqRegionEnd()
else:
print interpro_name, var,genomic_exon_start,genomic_bp_exon_offset,end_correction, ei.SeqRegionStart(), ei.SeqRegionEnd()
#print 'exon',ens_exon,eti.ExonId(),ei.SeqRegionStart(), ei.SeqRegionEnd()#"""
#print non_coding_seq_len, domain_in_first_exon, coding_bp_in_exon, genomic_exon_start, genomic_domain_start, genomic_bp_exon_offset, start_correction
#print seq_start, interpro_name,seq_end, cumulative_coding_length,last_exon_cumulative_coding_length, genomic_domain_start, genomic_domain_end, ei.SeqRegionStart(), ei.SeqRegionEnd()
last_exon_cumulative_coding_length = cumulative_coding_length + 1
if genomic_domain_start !=0 and genomic_domain_end !=0:
values = [ens_protein,(seq_start/3)+1,seq_end/3,genomic_domain_start,genomic_domain_end,hit_id,interpro_ac,interpro_name]
values_list.append(values)
print 'interprot_matches:',interprot_match
exportEnsemblTable(values_list,headers,output_dir)
def buildFilterDBForExternalDB(externalDBName):
### Generic function for pulling out any specific type of Xref without storing the whole database in memory
global external_filter_db
external_filter_db={}; key_filter_db={} ### Reset key_filter_db, otherwise this would cause importPrimaryEnsemblSQLTables to import the wrong entries
for external_db_id in external_db_db:
db_name = external_db_db[external_db_id].DbName()
if db_name == externalDBName: external_filter_db[external_db_id]=[]
def buildFilterDBForArrayDB(externalDBName):
### Generic function for pulling out any specific type of Xref without storing the whole database in memory
global external_filter_db
external_filter_db={}; key_filter_db={} ### Reset key_filter_db, otherwise this would cause importPrimaryEnsemblSQLTables to import the wrong entries
for array_chip_id in array_chip_db:
array_id = array_chip_db[array_chip_id].ArrayID()
try:
name = array_db[array_id].Name()
vendor = array_db[array_id].Vendor()
format = array_db[array_id].Format()
if name == externalDBName and (format != 'TILED' and '\\N' not in vendor):
external_filter_db[array_chip_id]=[vendor]
except KeyError: null=[]
def resetExternalFilterDB():
global external_filter_db; external_filter_db={}
external_filter_db=external_filter_db
def buildEnsemblGeneAnnotationTable(species,xref_db):
### Get Definitions and Symbol for each Ensembl gene ID
values_list = []; output_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt'
headers = ['Ensembl Gene ID','Description','Gene name']
for geneid in gene_db:
gi = gene_db[geneid]; display_xref_id = gi.DisplayXrefId(); description = gi.Description()
if description == '\\N': description = ''
try: symbol = xref_db[display_xref_id].DisplayLabel()
except KeyError: symbol = ''
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
values = [ens_gene,description,symbol]
values_list.append(values)
###Also, create a filter_db that allows for the creation of Ensembl-external gene db tables
exportEnsemblTable(values_list,headers,output_dir)
def buildEnsemblExternalDBRelationshipTable(external_system,xref_db,object_xref_db,output_id_type,species,writeToGOEliteFolder=False):
### Get xref annotations (e.g., external geneID) for a set of Ensembl IDs (e.g. gene IDs)
external_system = string.replace(external_system,'/','-') ###Some external relationship systems have a slash in them which will create unwanted directories
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze' and writeToGOEliteFolder == False:
output_dir = 'AltDatabase/'+external_system+'/'+species+'/'+species+'_Ensembl-'+external_system+'.txt'
else:
if program_type == 'AltAnalyze':
parent_dir = 'AltDatabase/goelite' ### Build directly with the AltAnalyze database
elif 'over-write previous' in overwrite_previous: parent_dir = 'Databases'
else: parent_dir = 'NewDatabases'
if external_system == 'GO':
output_dir = parent_dir+'/'+species+'/gene-go/Ensembl-GeneOntology.txt'
elif 'meta' in external_system:
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl_EntrezGene-meta.txt'
elif external_system in system_synonym_db:
system_name = system_synonym_db[external_system]
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-'+system_name+'.txt'
elif 'Uniprot' in external_system: ### Needed for AltAnalyze
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-Uniprot.txt'
else:
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-'+external_system+'.txt'
version_info = species+' Ensembl relationships downloaded from EnsemblSQL server, build '+ensembl_build
try: exportVersionInfo(output_dir,version_info)
except Exception: null=[]
headers = ['Ensembl ID',external_system + ' ID']; id_type_db={}; index=0
id_type_db={}; gene_relationship_db={}; transcript_relationship_db={}; translation_relationship_db={}
for xref_id in object_xref_db:
for ox in object_xref_db[xref_id]:
ens_numeric_id = ox.EnsemblId()
try:
index+=1
try: dbprimary_acc = xref_db[xref_id].DbprimaryAcc()
except Exception: print external_system, xref_id, xref_db[xref_id], len(xref_db);sys.exit()
external_db_id = xref_db[xref_id].ExternalDbId()
ens_object_type = ox.EnsemblObjectType()
#print len(xref_db),len(object_xref_db),len(external_filter_db),xref_id,dbprimary_acc,external_db_id,ens_object_type;kill
#if '13065181' == external_db_id or '13065181' == xref_id or '13065181' == dbprimary_acc: print 'blah';kill
if external_db_id in external_filter_db: ###Make sure other gene systems are not included
### For testing determine the most likely system linked to by the xref (e.g. gene, transcript or translation ID).
try: id_type_db[ox.EnsemblObjectType()]+=1
except KeyError: id_type_db[ox.EnsemblObjectType()]=1
if ens_object_type == 'Gene': gene_relationship_db[ens_numeric_id,dbprimary_acc]=[]
if ens_object_type == 'Transcript': transcript_relationship_db[ens_numeric_id,dbprimary_acc]=[]
if ens_object_type == 'Translation': translation_relationship_db[ens_numeric_id,dbprimary_acc]=[]
except KeyError: null=[]
ids=['ID types linked to '+external_system+' are: ']
for id_type in id_type_db: ### Tells us which ID types are most connected to a given external reference ID (don't often know)
ids+=[str(id_type),': ',str(id_type_db[id_type]),'\t']
ids = string.join(ids,''); print ids
values_list = convertBetweenEnsemblIDTypes(output_id_type,transcript_relationship_db,translation_relationship_db,gene_relationship_db)
if 'meta' in external_system:
values_list2=[]
for values in values_list: values_list2.append([values[1],values[0]]); values_list2.append(values)
values_list = values_list2
if len(values_list)>0:
exportEnsemblTable(values_list,headers,output_dir)
added_systems[external_system]=[]
return len(values_list)
def exportVersionInfo(dir,version_info):
dirs = string.split(dir,'/')
dir = string.join(dirs[:-1],'/') ### Remove the filename
data = export.ExportFile(dir+'/Ensembl_version.txt')
data.write(version_info+'\n')
def convertBetweenEnsemblIDTypes(output_id_type,transcript_relationship_db,translation_relationship_db,gene_relationship_db):
### Starting with gene, transcript or protein relationships to an xref, convert to a single bio-type
values_list=[]
### Get all proteins relative to transcripts
transcript_to_protein_db = {}
for protein_id in translation_db:
transcript_id = translation_db[protein_id].TranscriptId()
transcript_to_protein_db[transcript_id] = protein_id
### Get all transcripts relative to genes
gene_to_transcript_db={}
for transcript_id in transcript_db:
geneid = transcript_db[transcript_id].GeneId()
try: gene_to_transcript_db[geneid].append(transcript_id)
except KeyError: gene_to_transcript_db[geneid] = [transcript_id]
for (ens_numeric_id,dbprimary_acc) in transcript_relationship_db:
if output_id_type == 'Gene':
geneid = transcript_db[ens_numeric_id].GeneId();
try:
try: ens_id = gene_db[geneid].StableId()
except Exception: ens_id = gene_stable_id_db[geneid].StableId()
except KeyError: null = [] ### Again, this occurs in version 47
elif output_id_type == 'Transcription':
try: ens_id = transcript_db[ens_numeric_id].StableId()
except Exception: ens_id = transcript_stable_id_db[ens_numeric_id].StableId()
elif output_id_type == 'Translation':
try:
try: protein_id = transcript_to_protein_db[ens_numeric_id]; ens_id = translation_db[protein_id].StableId()
except Exception: protein_id = transcript_to_protein_db[ens_numeric_id]; ens_id = translation_stable_id_db[protein_id].StableId()
except KeyError: null = []
try: values = [ens_id,dbprimary_acc]; values_list.append(values)
except NameError: null = []
for (ens_numeric_id,dbprimary_acc) in translation_relationship_db:
if output_id_type == 'Gene':
transcript_id = translation_db[ens_numeric_id].TranscriptId()
geneid = transcript_db[transcript_id].GeneId()
try:
try: ens_id = gene_db[geneid].StableId()
except Exception: ens_id = gene_stable_id_db[geneid].StableId()
except KeyError: null = [] ### Again, this occurs in version 47
elif output_id_type == 'Transcription':
transcript_id = translation_db[ens_numeric_id].TranscriptId()
try: ens_id = transcript_db[transcript_id].StableId()
except Exception: ens_id = transcript_stable_id_db[transcript_id].StableId()
elif output_id_type == 'Translation': ens_id = translation_stable_id_db[ens_numeric_id].StableId()
try: values = [ens_id,dbprimary_acc]; values_list.append(values)
except NameError: null = []
for (ens_numeric_id,dbprimary_acc) in gene_relationship_db:
if output_id_type == 'Gene':
try: ens_id = gene_db[ens_numeric_id].StableId()
except Exception: ens_id = gene_stable_id_db[ens_numeric_id].StableId()
values = [ens_id,dbprimary_acc]; values_list.append(values)
elif output_id_type == 'Transcription':
transcript_ids = gene_to_transcript_db[ens_numeric_id]
for transcript_id in transcript_ids:
try: ens_id = transcript_db[transcript_id].StableId()
except Exception: ens_id = transcript_stable_id_db[transcript_id].StableId()
values = [ens_id,dbprimary_acc]; values_list.append(values)
elif output_id_type == 'Translation':
transcript_ids = gene_to_transcript_db[ens_numeric_id]
for transcript_id in transcript_ids:
try: ### Translate between transcripts to protein IDs
protein_id = transcript_to_protein_db[ens_numeric_id]
try: ens_id = translation_db[protein_id].StableId()
except Exception: ens_id = translation_stable_id_db[protein_id].StableId()
values = [ens_id,dbprimary_acc]; values_list.append(values)
except KeyError: null = []
values_list = unique.unique(values_list)
return values_list
def exportListstoFiles(values_list,headers,output_dir,rewrite):
global rewrite_existing; rewrite_existing = rewrite
exportEnsemblTable(values_list,headers,output_dir)
def exportEnsemblTable(values_list,headers,output_dir):
if rewrite_existing == 'no':
print 'Appending new data to',output_dir
try:values_list = combineEnsemblTables(values_list,output_dir) ###Combine with previous
except Exception: null=[]
data = export.ExportFile(output_dir)
if len(headers)>0:
headers = string.join(headers,'\t')+'\n'
data.write(headers)
for values in values_list:
try: values = string.join(values,'\t')+'\n'
except TypeError:
values_temp = values; values = []
for value in values_temp: values.append(str(value))
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
print 'File:',output_dir,'exported.'
def combineEnsemblTables(values_list,filename):
fn=filepath(filename); x=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
values_list.append(t)
values_list = unique.unique(values_list)
return values_list
class EnsemblSQLInfo:
def __init__(self, filename, url_type, importGroup, key, values, comments):
self._filename = filename; self._url_type = url_type; self._importGroup = importGroup
self._key = key; self._values = values; self._comments = comments
def Filename(self): return self._filename
def URLType(self): return self._url_type
def ImportGroup(self): return self._importGroup
def Key(self): return self._key
def Values(self):
value_list = string.split(self._values,'|')
return value_list
def FieldNames(self): ### These are fields in the description file to parse from downloaded files
field_names = self.Values()
field_names.append(self.Key())
return field_names
def setIndexDB(self,index_db): self._index_db = index_db
def IndexDB(self): return self._index_db
def Comments(self): return self._comments
def Report(self): return self.Key()+'|'+self.Values()
def __repr__(self): return self.Report()
class EnsemblSQLEntryData:
def setSQLValue(self,header,value):
if header == "seq_region_start": self.seq_region_start = value
elif header == "transcript_id": self.transcript_id = value
elif header == "stable_id": self.stable_id = value
elif header == "gene_id": self.gene_id = value
elif header == "biotype": self.biotype = value
elif header == "translation_id": self.translation_id = value
elif header == "id": self.id = value
elif header == "synonym": self.synonym = value
elif header == "external_db_id": self.external_db_id = value
elif header == "object_xref_id": self.object_xref_id = value
elif header == "db_name": self.db_name = value
elif header == "seq_end": self.seq_end = value
elif header == "end_exon_id": self.end_exon_id = value
elif header == "description": self.description = value
elif header == "hit_id": self.hit_id = value
elif header == "hit_name": self.hit_id = value
elif header == "ensembl_object_type": self.ensembl_object_type = value
elif header == "start_exon_id": self.start_exon_id = value
elif header == "seq_region_end": self.seq_region_end = value
elif header == "dbprimary_acc": self.dbprimary_acc = value
elif header == "seq_start": self.seq_start = value
elif header == "display_xref_id": self.display_xref_id = value
elif header == "display_label": self.display_label = value
elif header == "ensembl_id": self.ensembl_id = value
elif header == "seq_region_strand": self.seq_region_strand = value
elif header == "rank": self.rank = value
elif header == "seq_region_id": self.seq_region_id = value
elif header == "name": self.name = value
elif header == "exon_id": self.exon_id = value
elif header == "is_constitutive": self.is_constitutive = value
elif header == "interpro_ac": self.interpro_ac = value
elif header == "xref_id": self.xref_id = value
elif header == "evalue":
try: self.evalue = float(value)
except Exception: self.evalue = 0 ### For yeast, can be NA (likely a problem with Ensembl)
elif header == "vendor": self.vendor = value
elif header == "array_id": self.array_id = value
elif header == "array_chip_id": self.array_chip_id = value
elif header == "probe_set_id": self.probe_set_id = value
elif header == "format": self.format = value
else: ###Shouldn't occur, unless we didn't account for an object type
print 'Warning!!! An object type has been imported which does not exist in this class'
print 'Object type =',header;sys.exit()
### Create objects specified in the SQL Description and Configuration file
def SeqRegionStart(self): return self.seq_region_start
def TranscriptId(self): return self.transcript_id
def StableId(self): return self.stable_id
def GeneId(self): return self.gene_id
def Biotype(self): return self.biotype
def TranslationId(self): return self.translation_id
def Id(self): return self.id
def Synonym(self): return self.synonym
def ExternalDbId(self): return self.external_db_id
def ObjectXrefId(self): return self.object_xref_id
def DbName(self): return self.db_name
def ExonId(self): return self.exon_id
def IsConstitutive(self): return self.is_constitutive
def SeqEnd(self): return self.seq_end
def EndExonId(self): return self.end_exon_id
def Description(self): return self.description
def HitId(self): return self.hit_id
def EnsemblObjectType(self): return self.ensembl_object_type
def StartExonId(self): return self.start_exon_id
def SeqRegionEnd(self): return self.seq_region_end
def DbprimaryAcc(self): return self.dbprimary_acc
def SeqStart(self): return self.seq_start
def DisplayXrefId(self): return self.display_xref_id
def DisplayLabel(self): return self.display_label
def EnsemblId(self): return self.ensembl_id
def SeqRegionStrand(self): return self.seq_region_strand
def Rank(self): return self.rank
def Name(self): return self.name
def SeqRegionId(self): return self.seq_region_id
### Create new objects designated by downstream custom code
def setCodingBpInExon(self,coding_bp_in_exon): self.coding_bp_in_exon = coding_bp_in_exon
def CodingBpInExon(self): return self.coding_bp_in_exon
def setGenomicStart(self,genomic_start): self.genomic_start = genomic_start
def GenomicStart(self): return self.genomic_start
def setGenomicEnd(self,genomic_end): self.genomic_end = genomic_end
def GenomicEnd(self): return self.genomic_end
def InterproAc(self): return self.interpro_ac
def XrefId(self): return self.xref_id
def Evalue(self): return self.evalue
def Vendor(self): return self.vendor
def ArrayID(self): return self.array_id
def ArrayChipID(self): return self.array_chip_id
def ProbeSetID(self): return self.probe_set_id
def Format(self): return self.format
def setProbeSetID(self,probe_set_id): self.probe_set_id = probe_set_id
def importEnsemblSQLInfo(configType):
filename = 'Config/EnsemblSQL.txt'
fn=filepath(filename); sql_file_db={}; sql_group_db={}; x=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
filename, url_type, importGroup, configGroup, key, values, comments = string.split(data,'\t')
if x==0: x=1
else:
sq = EnsemblSQLInfo(filename, url_type, importGroup, key, values, comments)
sql_file_db[importGroup,filename] = sq
### To conserve memory, only import necessary files for each run (results in 1/5th the memory usage)
if configType == 'Basic' and configGroup == 'Basic': proceed = 'yes'
elif configType == 'Basic': proceed = 'no'
else: proceed = 'yes'
if proceed == 'yes':
try: sql_group_db[importGroup].append(filename)
except KeyError: sql_group_db[importGroup] = [filename]
return sql_file_db,sql_group_db
def importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_description_dir,sql_group_db,sql_file_db,output_dir,import_group,force):
if 'Primary' in import_group:
global exon_db; global transcript_db; global translation_stable_id_db
global exon_transcript_db; global transcript_stable_id_db; global gene_db
global exon_stable_id_db; global translation_db; global gene_stable_id_db
global protein_feature_db; global interpro_db; global external_synonym_db
global external_db_db; global key_filter_db; global external_filter_db
global seq_region_db; global array_db; global array_chip_db
global probe_set_db; global probe_db; global probe_feature_db
key_filter_db={}; external_filter_db={}; probe_set_db={}
###Generic function for importing all EnsemblSQL tables based on the SQLDescription tabl
for filename in sql_group_db['Description']: ### Gets the SQL description table
try: sql_filepaths = updateFiles(ensembl_sql_description_dir,output_dir,filename,force)
except Exception: ### Try again... can be a server problem
sql_filepaths = updateFiles(ensembl_sql_description_dir,output_dir,filename,force)
#print ensembl_sql_description_dir,output_dir,filename; print e; sys.exit()
for sql_filepath in sql_filepaths:
sql_file_db = importSQLDescriptions(import_group,sql_filepath,sql_file_db)
for filename in sql_group_db[import_group]:
sql_filepaths = updateFiles(ensembl_sql_dir,output_dir,filename,force)
if sql_filepaths == 'stable-combined-version':
### Hence, the file was not downloaded
print filename, 'not present in this version of Ensembl' ### Stable files are merged with the indexed files in Ensembl 65 and later
else:
#if 'object' in filename and 'Func' in output_dir: sql_filepaths = ['BuildDBs/EnsemblSQL/Dr/FuncGen/object_xref.001.txt','BuildDBs/EnsemblSQL/Dr/FuncGen/object_xref.002.txt']
for sql_filepath in sql_filepaths: ### If multiple files for a single table, run all files (retain orginal filename)
print 'Processing:',filename
try:
try: key_value_db = importPrimaryEnsemblSQLTables(sql_filepath,filename,sql_file_db[import_group,filename])
except Exception:
if key_value_db == 'stable-combined-version':
sql_filepaths == 'stable-combined-version' ### This is a new issue in version 2.1.1 - temporary fix
continue
"""except IOError:
sql_filepaths = updateFiles(ensembl_sql_dir,output_dir,filename,'yes')
key_value_db = importPrimaryEnsemblSQLTables(sql_filepath,filename,sql_file_db[import_group,filename])"""
if filename == "exon.txt": exon_db = key_value_db
elif filename == "exon_transcript.txt": exon_transcript_db = key_value_db
elif filename == "exon_stable_id.txt": exon_stable_id_db = key_value_db
elif filename == "transcript.txt": transcript_db = key_value_db
elif filename == "transcript_stable_id.txt": transcript_stable_id_db = key_value_db
elif filename == "translation.txt": translation_db = key_value_db
elif filename == "translation_stable_id.txt": translation_stable_id_db = key_value_db
elif filename == "gene.txt": gene_db = key_value_db
elif filename == "gene_stable_id.txt": gene_stable_id_db = key_value_db
elif filename == "protein_feature.txt": protein_feature_db = key_value_db
elif filename == "interpro.txt": interpro_db = key_value_db
elif filename == "external_synonym.txt": external_synonym_db = key_value_db
elif filename == "external_db.txt": external_db_db = key_value_db
elif filename == "seq_region.txt": seq_region_db = key_value_db
elif filename == "array.txt": array_db = key_value_db
elif filename == "array_chip.txt":
### Add chip type and vendor to external_filter_db to filter probe.txt
array_chip_db = key_value_db; buildFilterDBForArrayDB(externalDBName)
elif filename == "xref.txt":
try: xref_db = combineDBs(xref_db,key_value_db,'string')
except Exception: xref_db = key_value_db
if '.0' in sql_filepath: print 'Entries in xref_db', len(xref_db)
elif filename == "object_xref.txt":
try: object_xref_db = combineDBs(object_xref_db,key_value_db,'list')
except Exception: object_xref_db = key_value_db
if '.0' in sql_filepath: print 'Entries in object_xref_db', len(object_xref_db)
elif filename == "probe.txt":
try: probe_db = combineDBs(probe_db,key_value_db,'list')
except Exception: probe_db = key_value_db
if '.0' in sql_filepath: print 'Entries in probe_db', len(probe_db)
if 'AFFY' in manufacturer and 'ProbeLevel' in analysisType:
for probe_id in probe_db:
for pd in probe_db[probe_id]:
probe_set_db[pd.ProbeSetID()]=[] ### this dictionary is introduced prior to prob_set.txt import when annotating probe genome location
elif filename == "probe_set.txt":
if 'AFFY' in manufacturer and 'ProbeLevel' in analysisType:
for probe_id in probe_db:
for pi in probe_db[probe_id]:
pi.setProbeSetID(key_value_db[pi.ProbeSetID()].Name()) ### Add the probeset annotation name
del probe_set_db ### this object is only temporarily created during probe_db addition - used to restrict to probesets in probe_db
elif 'AFFY' in manufacturer:
probe_set_db = key_value_db
del probe_db
else:
probe_set_db = probe_db
del probe_db
elif filename == "probe_feature.txt":
try: probe_feature_db = key_value_db
except Exception: key_value_db = key_value_db
else: ###Shouldn't occur, unless we didn't account for a file type
print 'Warning!!! A file has been imported which does not exist in the program space'
print 'Filename =',filename;sys.exit()
except IOError,e:
print e
print '...Likely due to present SQL tables from a prior Ensembl version run that are no longer supported for this version. Ignoring and proceeding..'
if sql_filepaths != 'stable-combined-version':
if import_group == 'Primary':
key_filter_db={}
for geneid in gene_db:
gi = gene_db[geneid]; display_xref_id = gi.DisplayXrefId()
key_filter_db[display_xref_id]=[]
elif 'Object-Xref' in import_group:
return object_xref_db
elif 'Xref' in import_group:
return xref_db
elif 'PrimaryFunc' in import_group:
return xref_db
def combineDBs(db1,db2,type):
if type == 'string':
for key in db2: db1[key] = db2[key] ### No common keys ever with string
if type == 'list':
for key in db2:
try: db1[key] = db1[key]+db2[key] ### Occurs when same key exists, but different lists
except KeyError: db1[key] = db2[key]
return db1
def updateFiles(ensembl_sql_dir,output_dir,filename,force):
if force == 'no':
file_found = verifyFile(output_dir+filename)
#print file_found,output_dir+filename
if file_found == 'no':
index=1; sql_filepaths = []
while index<10:
filename_new = string.replace(filename,'.txt','.00'+str(index)+'.txt')
file_found = verifyFile(output_dir+filename_new); index+=1
if file_found == 'yes':
sql_filepaths.append(output_dir + filename_new)
if len(sql_filepaths)<1: force = 'yes'
else: sql_filepaths = [output_dir + filename]
if force == 'yes': ### Download the files, rather than re-use existing
ftp_url = ensembl_sql_dir+filename + '.gz'
try: gz_filepath, status = update.download(ftp_url,output_dir,'')
except Exception:
if 'stable' in filename:
sql_filepaths=[]
if 'Internet' in status:
index=1; status = ''; sql_filepaths=[]
while index<10:
if 'Internet' not in status: ### sometimes, instead of object_xref.txt the file will be name of object_xref.001.txt
ftp_url_new = string.replace(ftp_url,'.txt','.00'+str(index)+'.txt')
gz_filepath, status = update.download(ftp_url_new,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
sql_filepaths.append(gz_filepath[:-3])
index+=1
#print [[[sql_filepaths]]]
else:
sql_filepaths=sql_filepaths[:-1]; print ''
#print [[sql_filepaths]]
break
else:
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
sql_filepaths = [gz_filepath[:-3]]
#print [sql_filepaths]
if len(sql_filepaths)==0:
if 'stable' in filename: sql_filepaths = 'stable-combined-version' ### For Ensembl 65 and later (changed table organization from previous versions)
else:
print '\nThe file:',filename, 'is missing from Ensembl FTP directory for this version of Ensembl. Contact [email protected] to inform our developers of this change to the Ensembl database table structure or download the latest version of this software.'; force_exit
return sql_filepaths
def importPrimaryEnsemblSQLTables(sql_filepath,filename,sfd):
fn=filepath(sql_filepath)
index=0; key_value_db={}; data_types={}
if len(key_filter_db)>0: key_filter = 'yes'
else: key_filter = 'no'
if len(external_filter_db)>0: external_filter = 'yes'
else: external_filter = 'no'
try:
if len(external_xref_key_db)>0: external_xref_key_filter = 'yes'
else: external_xref_key_filter = 'no'
except NameError: external_xref_key_filter = 'no'
#print filename, key_filter, external_filter, external_xref_key_filter
try: index_db = sfd.IndexDB(); key_name = sfd.Key(); entries=0
except Exception:
return 'stable-combined-version'
if len(key_name)<1: key_value_db=[] ### No key, so store data in a list
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); data = string.split(data,'\t')
ese = EnsemblSQLEntryData(); skip = 'no'; entries+=1
if len(line)>3: ### In the most recent version (plant-16) encountered lines with just a single /
for index in index_db:
header_name,value_type = index_db[index]
try: value = data[index]
except Exception:
if 'array.' in filename: skip = 'yes'; value = ''
"""
### This will often occur due to prior lines having a premature end of line (bad formatting in source data) creating a bad line (just skip it)
else:
### Likely occurs when Ensembl adds new line types to their SQL file (bastards)
print index_db,data, index, len(data),header_name,value_type
kill"""
try:
if value_type == 'integer': value = int(value) # Integers will take up less space in memory
except ValueError:
if value == '\\N': value = value
elif 'array.' in filename: skip = 'yes' ### There can be formatting issues with this file when read in python
else: skip = 'yes'; value = '' #print filename,[header_name], index,value_type,[value];kill
###Although we are setting each index to value, the distinct headers will instruct
###EnsemblSQLEntryData to assign the value to a distinct object
if header_name != key_name:
ese.setSQLValue(header_name,value)
else:
key = value
### Filtering primarily used for all Xref, since this database is very large
#if filename == 'xref.txt':print len(key_name), key_filter,external_filter, external_xref_key_filter;kill
if skip == 'yes': null = []
elif 'probe.' in filename:
### For all array types (Affy & Other) - used to select specific array types - also stores non-Affy probe-data
### Each array has a specific ID - determine if this ID is the one we set in external_filter_db
if ese.ArrayChipID() in external_filter_db:
vendor = external_filter_db[ese.ArrayChipID()][0]
if vendor == 'AFFY' and analysisType != 'ProbeLevel':
key_value_db[ese.ProbeSetID()] = [] ### key is the probe_id - used for Affy probe ID location analysis
else:
try: key_value_db[key].append(ese) ### probe_set.txt only appears to contain AFFY IDs, the remainder are saved in probe.txt under probe_id
except KeyError: key_value_db[key] = [ese]
elif 'probe_set.' in filename:
if analysisType == 'ProbeLevel':
if key in probe_set_db: key_value_db[key] = ese
else:
if key in probe_db: key_value_db[key] = [ese]
elif 'probe_feature.' in filename:
if key in probe_db: key_value_db[key] = ese
elif len(key_name)<1: key_value_db.append(ese)
elif key_filter == 'no' and external_filter == 'no': key_value_db[key] = ese
elif external_xref_key_filter == 'yes':
if key in external_xref_key_db:
try: key_value_db[key].append(ese)
except KeyError: key_value_db[key] = [ese]
elif 'object_xref' in filename:
try:
if key in probe_set_db:
if (manufacturer == 'AFFY' and ese.EnsemblObjectType() == 'ProbeSet') or (manufacturer != 'AFFY' and ese.EnsemblObjectType() == 'Probe'):
try: key_value_db[key].append(ese)
except KeyError: key_value_db[key] = [ese]
data_types[ese.EnsemblObjectType()]=[]
except Exception: print external_xref_key_filter, key_filter, filename;kill
elif key in key_filter_db: key_value_db[key] = ese
elif 'seq_region.' in filename: key_value_db[key] = ese ### Specifically applies to ProbeLevel analyses
elif external_filter == 'yes': ### For example, if UniGene's dbase ID is in the external_filter_db (when parsing xref)
#if 'xref' in filename: print filename,[header_name], index,value_type,[value],ese.ExternalDbId(),external_filter_db;kill
try:
#print key, [ese.ExternalDbId()], [ese.DbprimaryAcc()], [ese.DisplayLabel()];kill
#if key == 1214287: print [ese.ExternalDbId()], [ese.DbprimaryAcc()], [ese.DisplayLabel()]
if ese.ExternalDbId() in external_filter_db: key_value_db[key] = ese
all_external_ids[ese.ExternalDbId()]=[]
except AttributeError:
print len(external_filter_db),len(key_filter_db)
print 'index_db',index_db,'\n'
print 'key_name',key_name
print len(external_filter_db)
print len(key_value_db);kill
#key_filter_db={}; external_filter_db={}; external_xref_key_db={}
if 'object_xref' in filename:
print external_xref_key_filter, key_filter, filename
print "Extracted",len(key_value_db),"out of",entries,"entries for",filename
print 'Data types linked to probes:',
for data_type in data_types: print data_type,
print ''
return key_value_db
def importSQLDescriptions(import_group,filename,sql_file_db):
fn=filepath(filename)
index_db={}; index=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if 'CREATE TABLE' in data:
###New file descriptions
file_broken = string.split(data,'`'); filename = file_broken[1]+".txt"
if (import_group,filename) in sql_file_db:
sql_data = sql_file_db[import_group,filename]
target_field_names = sql_data.FieldNames()
else: target_field_names=[]
elif data[:3] == ' `':
field_broken = string.split(data,'`'); field_name = field_broken[1]
if 'int(' in data: type = 'integer'
else: type = 'string'
if field_name in target_field_names: index_db[index]=field_name,type
index+=1
elif len(data)<2:
###Write out previous data here and clear entries
if len(index_db)>0: ### Thus fields in the Config file are found in the description file
sql_data.setIndexDB(index_db)
index_db = {}; index=0 ### re-set
elif '/*' in data or 'DROP TABLE IF EXISTS' in data:
None ### Not sure what this line is that has recently been added
else: index+=1
if len(index_db)>0: asql_data.setIndexDB(index_db)
return sql_file_db
def storeFTPDirs(ftp_server,subdir,dirtype):
from ftplib import FTP
ftp = FTP(ftp_server); ftp.login()
try: ftp.cwd(subdir)
except Exception:
subdir = string.replace(subdir,'/mysql','') ### Older version don't have this subdir
ftp.cwd(subdir)
data = []; child_dirs={};species_list=[]
ftp.dir(data.append); ftp.quit()
for line in data:
line = string.split(line,' '); file_dir = line[-1]
if dirtype in file_dir:
species_name_data = string.split(file_dir,dirtype)
species_name = species_name_data[0]
if 'bacteria' not in species_name: ### Occurs for Bacteria Genomes
species_name = string.replace(string.upper(species_name[0])+species_name[1:],'_',' ')
ensembl_sql_dir = 'ftp://'+ftp_server+subdir+'/'+file_dir+'/'
ensembl_sql_description_dir = file_dir+'.sql'
child_dirs[species_name] = ensembl_sql_dir,ensembl_sql_description_dir
species_list.append(species_name)
species_list.sort()
return child_dirs,species_list
def getEnsemblVersions(ftp_server,subdir):
from ftplib import FTP
ftp = FTP(ftp_server); ftp.login()
ftp.cwd(subdir)
data = []; ensembl_versions=[]
ftp.dir(data.append); ftp.quit()
for line in data:
line = string.split(line,' '); file_dir = line[-1]
if 'release' in file_dir and '/' not in file_dir:
version_number = int(string.replace(file_dir,'release-',''))
if version_number>46: ###Before this version, the SQL FTP folder structure differed substantially
ensembl_versions.append(file_dir)
return ensembl_versions
def clearall():
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all: del globals()[var]
def clearvar(varname):
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
if var == varname: del globals()[var]
def getCurrentEnsemblSpecies(version):
original_version = version
if 'EnsMart' in version:
version = string.replace(version,'EnsMart','') ### User may enter EnsMart65, but we want 65 (release- is added before the number)
version = string.replace(version,'Plus','')
if 'Plant' in version:
version = string.replace(version,'Plant','')
if 'Fungi' in version:
version = string.replace(version,'Fungi','')
if 'Bacteria' in version:
version = string.replace(version,'Bacteria','')
if 'release' not in version and 'current' not in version:
version = 'release-'+version
if 'Plant' in original_version or 'Bacteria' in original_version or 'Fungi' in original_version:
ftp_server = 'ftp.ensemblgenomes.org'
else:
ftp_server = 'ftp.ensembl.org'
if version == 'current':
subdir = '/pub/current_mysql'
elif 'Plant' in original_version:
subdir = '/pub/plants/'+version+'/mysql'
elif 'Bacteria' in original_version:
subdir = '/pub/'+version+'/bacteria/mysql'
elif 'Fungi' in original_version:
subdir = '/pub/'+version+'/fungi/mysql'
else:
subdir = '/pub/'+version+'/mysql'
dirtype = '_core_'
ensembl_versions = getEnsemblVersions(ftp_server,'/pub')
child_dirs, species_list = storeFTPDirs(ftp_server,subdir,dirtype)
return child_dirs, species_list, ensembl_versions
def getCurrentEnsemblSequences(version,dirtype,species):
ftp_server = 'ftp.ensembl.org'
if version == 'current': subdir = '/pub/current_'+dirtype
else: subdir = '/pub/'+version+'/'+dirtype
seq_dir = storeSeqFTPDirs(ftp_server,species,subdir,dirtype)
return seq_dir
def getCurrentEnsemblGenomesSequences(version,dirtype,species):
original_version = version
if 'Fungi' in version:
version = string.replace(version,'Fungi','')
if 'Plant' in version:
version = string.replace(version,'Plant','')
if 'Bacteria' in version:
version = string.replace(version,'Bacteria','')
ftp_server = 'ftp.ensemblgenomes.org'
if version == 'current': subdir = '/pub/current_'+dirtype
elif 'Bacteria' in original_version:
subdir = '/pub/'+version+'/bacteria/'+dirtype
elif 'Fungi' in original_version:
subdir = '/pub/'+version+'/fungi/'+dirtype
else: subdir = '/pub/plants/'+version+'/'+dirtype
seq_dir = storeSeqFTPDirs(ftp_server,species,subdir,dirtype)
return seq_dir
def storeSeqFTPDirs(ftp_server,species,subdir,dirtype):
from ftplib import FTP
ftp = FTP(ftp_server); ftp.login()
#print subdir;sys.exit()
try: ftp.cwd(subdir)
except Exception:
subdir = string.replace(subdir,'/'+dirtype,'') ### Older version don't have this subdir
ftp.cwd(subdir)
data = []; seq_dir=[]; ftp.dir(data.append); ftp.quit()
for line in data:
line = string.split(line,' '); file_dir = line[-1]
if species[1:] in file_dir:
if '.fa' in file_dir and '.all' in file_dir: seq_dir = 'ftp://'+ftp_server+subdir+'/'+file_dir
elif '.fa' in file_dir and 'dna.chromosome' in file_dir:
try: seq_dir.append((file_dir,'ftp://'+ftp_server+subdir+'/'+file_dir))
except Exception: seq_dir=[]; seq_dir.append((file_dir,'ftp://'+ftp_server+subdir+'/'+file_dir))
elif '.fa' in file_dir and 'dna.' in file_dir and 'nonchromosomal' not in file_dir:
### This is needed when there are numbered chromosomes (e.g., ftp://ftp.ensembl.org/pub/release-60/fasta/anolis_carolinensis/dna/)
try: seq_dir2.append((file_dir,'ftp://'+ftp_server+subdir+'/'+file_dir))
except Exception: seq_dir2=[]; seq_dir2.append((file_dir,'ftp://'+ftp_server+subdir+'/'+file_dir))
if len(seq_dir)==0: seq_dir = seq_dir2
return seq_dir
"""
def getExternalDBs(Species,ensembl_sql_dir,ensembl_sql_description_dir):
global species; species = Species;
configType = 'Basic'
sql_file_db,sql_group_db = importEnsemblSQLInfo(configType) ###Import the Config file with the files and fields to parse from the donwloaded SQL files
sql_group_db['Description'] = [ensembl_sql_description_dir]
info = string.split(ensembl_sql_description_dir,'_'); ensembl_build = info[-2]
sq = EnsemblSQLInfo(ensembl_sql_description_dir, 'EnsemblSQLDescriptions', 'Description', '', '', '')
sql_file_db['Primary',ensembl_sql_description_dir] = sq
output_dir = 'BuildDBs/EnsemblSQL/'+species+'/'
importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Primary',force) ###Download and import the Ensembl SQL files
"""
class SystemData:
def __init__(self, syscode, sysname, mod):
self._syscode = syscode; self._sysname = sysname; self._mod = mod
def SystemCode(self): return self._syscode
def SystemName(self): return self._sysname
def MOD(self): return self._mod
def __repr__(self): return self.SystemCode()+'|'+self.SystemName()+'|'+self.MOD()
def importSystemInfo():
filename = 'Config/source_data.txt'; x=0
system_list=[]; system_codes={}
fn=filepath(filename); mod_list=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if '!DOCTYPE' in data:
fn2 = string.replace(fn,'.txt','_archive.txt')
import shutil; shutil.copyfile(fn2,fn) ### Bad file was downloaded (with warning)
importSystemInfo(); break
else:
try: sysname=t[0];syscode=t[1]
except Exception: sysname=''
try: mod = t[2]
except Exception: mod = ''
if x==0: x=1
elif sysname != '':
system_list.append(sysname)
ad = SystemData(syscode,sysname,mod)
if len(mod)>1: mod_list.append(sysname)
system_codes[sysname] = ad
return system_codes,system_list,mod_list
def buildGOEliteDBs(Species,ensembl_sql_dir,ensembl_sql_description_dir,ExternalDBName,configType,analysis_type,Overwrite_previous,Rewrite_existing,external_system_db,force):
global external_xref_key_db; global species; species = Species; global overwrite_previous; overwrite_previous = Overwrite_previous
global rewrite_existing; rewrite_existing = Rewrite_existing; global ensembl_build; global externalDBName; externalDBName = ExternalDBName
global ensembl_build; ensembl_build = string.split(ensembl_sql_dir,'core')[-1][:-1]; global analysisType; analysisType = analysis_type
global manufacturer; global system_synonym_db; global added_systems; added_systems={}; global all_external_ids; all_external_ids={}
### Get System Code info and create DBs for automatically formatting system output names
### This is necessary to ensure similiar systems with different names are saved and referenced
### by the same system name and system code
import UI;
try: system_codes,source_types,mod_types = UI.remoteSystemInfo()
except Exception: system_codes,source_types,mod_types = importSystemInfo()
system_synonym_db = {}; system_code_db={}; new_system_codes={}
for system_name in system_codes: system_code_db[system_codes[system_name].SystemCode()] = system_name
if externalDBName != 'GO':
system_code = external_system_db[externalDBName]
try: system_name = system_code_db[system_code]
except Exception:
system_name = externalDBName
ad = UI.SystemData(system_code,system_name,'')
new_system_codes[externalDBName] = ad ### Add these to the source_data file if relationships added (see below)
system_code_db[system_code] = system_name ### Make sure that only one new system name for the code is added
system_synonym_db[externalDBName] = system_name
### Get Ensembl Data
sql_file_db,sql_group_db = importEnsemblSQLInfo(configType) ###Import the Config file with the files and fields to parse from the donwloaded SQL files
sql_group_db['Description'] = [ensembl_sql_description_dir]
info = string.split(ensembl_sql_description_dir,'_'); ensembl_build = info[-2]
sq = EnsemblSQLInfo(ensembl_sql_description_dir, 'EnsemblSQLDescriptions', 'Description', '', '', '')
sql_file_db['Primary',ensembl_sql_description_dir] = sq
output_dir = 'BuildDBs/EnsemblSQL/'+species+'/'
importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Primary',force) ###Download and import the Ensembl SQL files
export_status='yes';output_id_type='Gene'
if analysisType == 'GeneAndExternal':
if 'over-write previous' in overwrite_previous: output_ens_dir = 'Databases/'+species+'/gene/Ensembl.txt'
else: output_ens_dir = 'NewDatabases/'+species+'/gene/Ensembl.txt'
file_found = verifyFile(output_ens_dir)
revert_force = 'no'
if file_found == 'no':
if force == 'yes': force = 'no'; revert_force = 'yes'
xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Xref',force)
buildEnsemblGeneGOEliteTable(species,xref_db,overwrite_previous)
###Export data for Ensembl-External gene system
buildFilterDBForExternalDB(externalDBName)
xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Xref',force)
external_xref_key_db = xref_db
if revert_force == 'yes': force = 'yes'
object_xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Object-Xref',force)
num_relationships_exported = buildEnsemblExternalDBRelationshipTable(externalDBName,xref_db,
object_xref_db,output_id_type,species,writeToGOEliteFolder=True)
if 'EntrezGene' in externalDBName: ### Make a meta DB table for translating WikiPathway primary IDs
buildEnsemblExternalDBRelationshipTable('meta',xref_db,object_xref_db,
output_id_type,species,writeToGOEliteFolder=True)
### Add New Systems to the source_data relationships file (only when valid relationships found)
for externalDBName in new_system_codes:
if externalDBName in added_systems:
ad = new_system_codes[externalDBName]
system_codes[ad.SystemName()] = ad
UI.exportSystemInfoRemote(system_codes)
if analysisType == 'FuncGen' or analysisType == 'ProbeLevel':
sl = string.split(externalDBName,'_'); manufacturer=sl[0];externalDBName=string.replace(externalDBName,manufacturer+'_','')
ensembl_sql_dir = string.replace(ensembl_sql_dir,'_core_', '_funcgen_')
ensembl_sql_description_dir = string.replace(ensembl_sql_description_dir,'_core_', '_funcgen_')
sql_file_db,sql_group_db = importEnsemblSQLInfo('FuncGen') ###Import the Config file with the files and fields to parse from the donwloaded SQL files
sql_group_db['Description'] = [ensembl_sql_description_dir]
info = string.split(ensembl_sql_description_dir,'_'); ensembl_build = info[-2]
sq = EnsemblSQLInfo(ensembl_sql_description_dir, 'EnsemblSQLFuncDescriptions', 'Description', '', '', '')
sql_file_db['PrimaryFunc',ensembl_sql_description_dir] = sq
output_dir = 'BuildDBs/EnsemblSQL/'+species+'/FuncGen/'
xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'PrimaryFunc',force) ###Download and import the Ensembl SQL files
if analysisType == 'FuncGen':
#print len(probe_set_db), len(transcript_stable_id_db), len(xref_db)
object_xref_db = importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'Object-XrefFunc',force)
buildEnsemblArrayDBRelationshipTable(xref_db,object_xref_db,output_id_type,externalDBName,species)
if analysisType == 'ProbeLevel':
### There is alot of unnecessary data imported for this specific analysis but this strategy is likely the most straight forward code addition
sql_file_db['ProbeFeature',ensembl_sql_description_dir] = sq
importEnsemblSQLFiles(ensembl_sql_dir,ensembl_sql_dir,sql_group_db,sql_file_db,output_dir,'ProbeFeature',force)
outputProbeGenomicLocations(externalDBName,species)
return all_external_ids
def buildEnsemblArrayDBRelationshipTable(xref_db,object_xref_db,output_id_type,externalDBName,species):
### Get xref annotations (e.g., external geneID) for a set of Ensembl IDs (e.g. gene IDs)
external_system = manufacturer
external_system = external_system[0]+string.lower(external_system[1:])
print 'Exporting',externalDBName
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze':
parent_dir = 'AltDatabase/goelite'
elif 'over-write previous' in overwrite_previous: parent_dir = 'Databases'
else: parent_dir = 'NewDatabases'
if 'Affy' in external_system:
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-Affymetrix.txt'
elif 'Agilent' in external_system:
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-Agilent.txt'
elif 'Illumina' in external_system:
output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-Illumina.txt'
else: output_dir = parent_dir+'/'+species+'/uid-gene/Ensembl-MiscArray.txt'
version_info = species+' Ensembl relationships downloaded from EnsemblSQL server, build '+ensembl_build
exportVersionInfo(output_dir,version_info)
headers = ['Ensembl ID',external_system + ' ID']; id_type_db={}; index=0
id_type_db={}; gene_relationship_db={}; transcript_relationship_db={}; translation_relationship_db={}
### Get Ensembl gene-transcript relationships from core files
ens_transcript_db={}
for transcript_id in transcript_db:
ti = transcript_db[transcript_id]
try: ens_transcript = transcript_db[transcript_id].StableId()
except Exception: ens_transcript = transcript_stable_id_db[transcript_id].StableId()
ens_transcript_db[ens_transcript] = ti
values_list=[]; nulls=0; type_errors=[]
### Get array ID-gene relationships from exref and probe dbs
if len(probe_set_db)>0:
for probe_set_id in object_xref_db:
try:
for ox in object_xref_db[probe_set_id]:
try:
transcript_id = ox.XrefId()
ens_transcript = xref_db[transcript_id].DbprimaryAcc()
ti = ens_transcript_db[ens_transcript]; geneid = ti.GeneId()
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
for ps in probe_set_db[probe_set_id]:
probeset = ps.Name() ### The same probe ID shouldn't exist more than once, but for some arrays it does (associaed with multiple probe names)
values_list.append([ens_gene,probeset])
except Exception: nulls+=1
except TypeError,e:
null=[]
print probe_set_id, len(probe_set_db), len(probe_set_db[probe_set_id])
for ps in probe_set_db[probe_set_id]:
probeset = ps.Name()
print probeset
print 'ERROR ENCOUNTERED.',e; sys.exit()
values_list = unique.unique(values_list)
print 'ID types linked to '+external_system+' are: Gene:',len(values_list),'... not linked:',nulls
if len(values_list)>0:
exportEnsemblTable(values_list,headers,output_dir)
else: print "****** Unknown problem encountered with the parsing of:", externalDBName
def outputProbeGenomicLocations(externalDBName,species):
external_system = manufacturer
external_system = external_system[0]+string.lower(external_system[1:])
print 'Exporting',externalDBName
output_dir = 'Affymetrix/'+species+'/'+externalDBName+'.txt'
if 'Affy' in external_system:
headers = ['ProbeXY','ProbesetName','Chr','Start','End','Strand']; values_list=[]
print len(probe_db), ':Probes in Ensembl', len(probe_feature_db),':Probes with genomic locations'
for probe_id in probe_db:
for pi in probe_db[probe_id]: ### probe ID
probe_set_name = pi.ProbeSetID() ### probe_set_id is only a superfluous annotation - not used downstream
probe_name = pi.Name()
try:
pl = probe_feature_db[probe_id] ### probe genomic location data
chr = seq_region_db[pl.SeqRegionId()].Name()
seq_region_start = pl.SeqRegionStart(); seq_region_end = pl.SeqRegionEnd(); strand = pl.SeqRegionStrand()
values_list.append([probe_name,probe_set_name,chr,seq_region_start,seq_region_end,strand])
except Exception: null=[]
values_list = unique.unique(values_list)
print 'ID types linked to '+external_system+' are: Probe:',len(values_list)
if len(values_list)>0:
exportEnsemblTable(values_list,headers,output_dir)
def buildEnsemblGeneGOEliteTable(species,xref_db,overwrite_previous):
### Get Definitions and Symbol for each Ensembl gene ID
values_list = []
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze':
output_dir = 'AltDatabase/goelite/'+species+'/gene/Ensembl.txt'
elif 'over-write previous' in overwrite_previous:
output_dir = 'Databases/'+species+'/gene/Ensembl.txt'
else:
output_dir = 'NewDatabases/'+species+'/gene/Ensembl.txt'
headers = ['ID','Symbol','Description']
for geneid in gene_db:
gi = gene_db[geneid]; display_xref_id = gi.DisplayXrefId(); description = gi.Description()
if description == '\\N': description = ''
try: symbol = xref_db[display_xref_id].DisplayLabel()
except KeyError: symbol = ''
try:
try: ens_gene = gene_db[geneid].StableId()
except Exception: ens_gene = gene_stable_id_db[geneid].StableId()
values = [ens_gene,symbol,description]
values_list.append(values)
except KeyError: null=[] ### In version 47, discovered that some geneids are not in the gene_stable - inclear why this is
###Also, create a filter_db that allows for the creation of Ensembl-external gene db tables
exportEnsemblTable(values_list,headers,output_dir)
def verifyFile(filename):
fn=filepath(filename); file_found = 'yes'
try:
for line in open(fn,'rU').xreadlines():break
except Exception: file_found = 'no'
return file_found
if __name__ == '__main__':
import update
dp = update.download_protocol('ftp://ftp.ensembl.org/pub/release-72/mysql/macaca_mulatta_core_72_10/gene.txt.gz','AltDatabase/ensembl/Ma/EnsemblSQL/','')
reload(update)
dp = update.download_protocol('ftp://ftp.ensembl.org/pub/release-72/mysql/macaca_mulatta_core_72_10/gene.txt.gz','AltDatabase/ensembl/Ma/EnsemblSQL/','');sys.exit()
getGeneTranscriptOnly('Gg','Basic','EnsMart65','yes');sys.exit()
#getChrGeneOnly('Hs','Basic','EnsMart65','yes');sys.exit()
analysisType = 'GeneAndExternal'; externalDBName_list = ['Ens_Gg_transcript']
force = 'yes'; configType = 'Basic'; overwrite_previous = 'no'; iteration=0; version = 'current'
print 'proceeding'
analysisType = 'ExternalOnly'
ensembl_version = '65'
species = 'Gg'
#ensembl_version = 'Fungi27'
#species = 'Nc'
#print string.replace(unique.getCurrentGeneDatabaseVersion(),'EnsMart','');sys.exit()
#getEnsemblTranscriptSequences(ensembl_version,species,restrictTo='cDNA');sys.exit()
#getFullGeneSequences('Bacteria18','bacteria_1_collection'); sys.exit()
#for i in child_dirs: print child_dirs[i]
#"""
### WON'T WORK FOR MORE THAN ONE EXTERNAL DATABASE -- WHEN RUN WITHIN THIS MOD
species_full = 'Neurospora crassa'
species_full = 'Gallus gallus'
species_full = 'Mus musculus'; ensembl_version = '72'; force = 'no'; species = 'Mm'; analysisType = 'AltAnalyzeDBs'; configType = 'Advanced'
#child_dirs, ensembl_species, ensembl_versions = getCurrentEnsemblSpecies(ensembl_version)
#genus,species = string.split(species_full,' '); species = genus[0]+species[0]
#ensembl_sql_dir,ensembl_sql_description_dir = child_dirs[species_full]
rewrite_existing = 'no'
external_system_db = {'Ens_Gg_transcript':'Et'}
for externalDBName in externalDBName_list:
if force == 'yes' and iteration == 1: force = 'no'
#buildGOEliteDBs(species,ensembl_sql_dir,ensembl_sql_description_dir,externalDBName,configType,analysisType,overwrite_previous,rewrite_existing,external_system_db,force); iteration+=1
buildEnsemblRelationalTablesFromSQL(species,configType,analysisType,externalDBName,ensembl_version,force)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/EnsemblSQL.py
|
EnsemblSQL.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from build_scripts import GO_parsing
import copy
import time
try: from build_scripts import alignToKnownAlt
except Exception: pass ### circular import error
import export
import traceback
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def findAligningIntronBlock(ed,intron_block_db,exon_block_db,last_region_db):
aligned_region = 'no'
for block in intron_block_db:
for rd in intron_block_db[block]:
intron_pos = [rd.ExonStart(), rd.ExonStop()]; intron_pos.sort()
intronid = rd.IntronRegionID() ### The intron block (relative to the exon block) is assigned in the function exon_clustering
retained_pos = [ed.ExonStart(), ed.ExonStop()]; retained_pos.sort()
aligned_region = 'no'
#print ed.GeneID(), rd.GeneID(), retained_pos,intron_pos,intronid
if intron_pos == retained_pos: aligned_region = 'yes'
elif intron_pos[0] == retained_pos[0] or intron_pos[1] == retained_pos[1]: aligned_region = 'yes'
elif (intron_pos[0]<retained_pos[0] and intron_pos[1]>retained_pos[0]) or (intron_pos[0]<retained_pos[1] and intron_pos[1]>retained_pos[1]): aligned_region = 'yes'
elif (retained_pos[0]<intron_pos[0] and retained_pos[1]>intron_pos[0]) or (retained_pos[0]<intron_pos[1] and retained_pos[1]>intron_pos[1]): aligned_region = 'yes'
if aligned_region == 'yes':
#print 'intron-aligned',intronid
ed.setAssociatedSplicingEvent('intron-retention')
rd.updateDistalIntronRegion()
intronid = intronid[:-1]+str(rd.DistalIntronRegion())
intronid = string.replace(intronid,'-','.')
ed.setNewIntronRegion(intronid); break
if aligned_region == 'yes': break
if aligned_region == 'no':
for block in exon_block_db:
for rd in exon_block_db[block]:
exonregion_pos = [rd.ExonStart(), rd.ExonStop()]; exonregion_pos.sort()
exonid = rd.ExonRegionID() ### The exon block (relative to the exon block) is assigned in the function exon_clustering
retained_pos = [ed.ExonStart(), ed.ExonStop()]; retained_pos.sort()
aligned_region = 'no'
#print ed.GeneID(),rd.GeneID(), retained_pos,exonregion_pos,exonid
if exonregion_pos == retained_pos: aligned_region = 'yes'
elif exonregion_pos[0] == retained_pos[0] or exonregion_pos[1] == retained_pos[1]: aligned_region = 'yes'
elif (exonregion_pos[0]<retained_pos[0] and exonregion_pos[1]>retained_pos[0]) or (exonregion_pos[0]<retained_pos[1] and exonregion_pos[1]>retained_pos[1]): aligned_region = 'yes'
elif (retained_pos[0]<exonregion_pos[0] and retained_pos[1]>exonregion_pos[0]) or (retained_pos[0]<exonregion_pos[1] and retained_pos[1]>exonregion_pos[1]): aligned_region = 'yes'
if aligned_region == 'yes':
#print 'exon aligned',exonid
#print len(last_region_db),last_region_db[rd.ExonNumber()]
last_region_db[rd.ExonNumber()].sort(); last_region = last_region_db[rd.ExonNumber()][-1]
last_region_db[rd.ExonNumber()].append(last_region+1)
#print len(last_region_db),last_region_db[rd.ExonNumber()],'E'+str(rd.ExonNumber())+'.'+str(last_region+1)
ed.setAssociatedSplicingEvent('exon-region-exclusion')
exonid = 'E'+str(rd.ExonNumber())+'.'+str(last_region+1)
ed.setNewIntronRegion(exonid); break
if aligned_region == 'yes': break
return ed,last_region_db
def getUCSCSplicingAnnotations(ucsc_events,splice_events,start,stop):
splice_events = string.split(splice_events,'|')
for (r_start,r_stop,splice_event) in ucsc_events:
if ((start >= r_start) and (start < r_stop)) or ((stop > r_start) and (stop <= r_stop)):
splice_events.append(splice_event)
elif ((r_start >= start) and (r_start <= stop)) or ((r_stop >= start) and (r_stop <= stop)): ### Applicable to polyA annotations
splice_events.append(splice_event)
unique.unique(splice_events)
splice_events = string.join(splice_events,'|')
try:
if splice_events[0] == '|': splice_events=splice_events[1:]
except IndexError: null=[]
return splice_events
def exportSubGeneViewerData(exon_regions,exon_annotation_db2,critical_gene_junction_db,intron_region_db,intron_retention_db,full_junction_db,excluded_intronic_junctions,ucsc_splicing_annot_db):
intron_retention_db2={}
for (gene,chr,strand) in intron_retention_db:
for intron_info in intron_retention_db[(gene,chr,strand)]:
pos1,pos2,ed = intron_info; pos_list=[pos1,pos2]; pos_list.sort()
try: intron_retention_db2[gene].append(pos_list)
except KeyError: intron_retention_db2[gene] = [pos_list]
exon_annotation_export = 'AltDatabase/ensembl/'+species+'/'+species+'_SubGeneViewer_exon-structure-data.txt'
print 'Writing the file:',exon_annotation_export
fn=filepath(exon_annotation_export); sgvdata = open(fn,'w')
title = ['gene','exon-id','type','block','region','constitutive','start-exon','annotation']
title = string.join(title,'\t')+'\n'; sgvdata.write(title)
full_exon_structure_export = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
full_junction_structure_export = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
exondata = export.ExportFile(full_exon_structure_export); junctiondata = export.ExportFile(full_junction_structure_export)
exontitle = ['gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions']
exontitle = string.join(exontitle,'\t')+'\n'; exondata.write(exontitle); junctiondata.write(exontitle)
export_annotation = '_intronic'
alt_junction_export = 'AltDatabase/ensembl/'+species+'/'+species+'_alternative_junctions'+export_annotation+'.txt'
print 'Writing the file:',alt_junction_export
fn=filepath(alt_junction_export); reciprocol_junction_data = open(fn,'w')
title = ['gene','critical-exon-id','junction1','junction2']
title = string.join(title,'\t')+'\n'; reciprocol_junction_data.write(title)
### exon_annotation_db2 contains coordinates and ensembl exon IDs for each exon - extract out just this data
exon_coordinate_db={}
for key in exon_annotation_db2:
gene=key[0]; exon_coords={}
for exon_data in exon_annotation_db2[key]:
exon_start = exon_data[1][0]; exon_stop = exon_data[1][1]; ed = exon_data[1][2]
exon_coords[exon_start,exon_stop] = ed.ExonID()
exon_coordinate_db[gene] = exon_coords
intron_coordinate_db={}; exon_region_annotations={}; intron_junction_db={}
for key in intron_retention_db:
gene=key[0]; exon_coords={}
for exon_data in intron_retention_db[key]:
exon_start = exon_data[0]; exon_stop = exon_data[1]; ed = exon_data[2]
exon_coords[exon_start,exon_stop] = ed.ExonID()
intron_coordinate_db[gene] = exon_coords
for gene in exon_regions:
previous_exonid=''; previous_intronid=''
block_db = exon_regions[gene]
try:intron_block_db = intron_region_db[gene]; introns = 'yes'
except KeyError: introns = 'no'
if gene in ucsc_splicing_annot_db: ucsc_events = ucsc_splicing_annot_db[gene]
else: ucsc_events = []
utr_data = [gene,'U0.1','u','0','1','n','n','']
values = string.join(utr_data,'\t')+'\n'; sgvdata.write(values)
index=1
for block in block_db:
for rd in block_db[block]:
splice_event = rd.AssociatedSplicingEvent();exon_pos = [rd.ExonStart(), rd.ExonStop()]; exon_pos.sort()
if gene in intron_retention_db2:
for retained_pos in intron_retention_db2[gene]:
if exon_pos == retained_pos: splice_event = 'exon-region-exclusion'
elif exon_pos[0] == retained_pos[0] or exon_pos[1] == retained_pos[1]: splice_event = 'exon-region-exclusion'
elif exon_pos[0]>retained_pos[0] and exon_pos[0]<retained_pos[1] and exon_pos[1]>retained_pos[0] and exon_pos[1]<retained_pos[1]: splice_event = 'exon-region-exclusion'
if 'exon-region-exclusion' in splice_event: rd.setAssociatedSplicingEvent(splice_event); id = rd
if len(splice_event)>0: constitutive_call = 'no' ### If a splice-event is associated with a recommended constitutive region, over-ride it
else: constitutive_call = rd.ConstitutiveCallAbrev()
values = [gene,rd.ExonRegionID2(),'e',str(index),str(rd.RegionNumber()),constitutive_call[0],'n',splice_event]#,str(exon_pos[0]),str(exon_pos[1])]
values = string.join(values,'\t')+'\n'; sgvdata.write(values)
ens_exons = getMatchingEnsExons(rd.ExonStart(),rd.ExonStop(),exon_coordinate_db[gene])
start_stop = [rd.ExonStart(),rd.ExonStop()]; start_stop.sort(); start,stop = start_stop
splice_event = getUCSCSplicingAnnotations(ucsc_events,splice_event,start,stop)
if len(splice_event)>0: constitutive_call = 'no' ### If a splice-event is associated with a recommended constitutive region, over-ride it
else: constitutive_call = rd.ConstitutiveCallAbrev()
values = [gene,rd.ExonRegionID2(),'chr'+rd.Chr(),rd.Strand(),str(start),str(stop),constitutive_call,ens_exons,splice_event,rd.AssociatedSplicingJunctions()]
values = string.join(values,'\t')+'\n'; exondata.write(values)
exon_region_annotations[gene,rd.ExonRegionID2()]=ens_exons
if len(previous_intronid)>0:
### Intron junctions are not stored and are thus not analyzed as recipricol junctions (thus add them hear for RNASeq)
try: intron_junction_db[gene].append((previous_exonid,previous_intronid))
except Exception: intron_junction_db[gene] = [(previous_exonid,previous_intronid)]
intron_junction_db[gene].append((previous_intronid,rd.ExonRegionID2()))
values = [gene,previous_intronid,previous_exonid+'-'+previous_intronid,previous_exonid+'-'+rd.ExonRegionID2(),id.AssociatedSplicingEvent()]
values = string.join(values,'\t')+'\n'; reciprocol_junction_data.write(values)
values = [gene,previous_intronid,previous_intronid+'-'+rd.ExonRegionID2(),previous_exonid+'-'+rd.ExonRegionID2(),id.AssociatedSplicingEvent()]
values = string.join(values,'\t')+'\n'; reciprocol_junction_data.write(values)
id.setAssociatedSplicingJunctions(previous_exonid+'-'+previous_intronid+'|'+previous_intronid+'-'+rd.ExonRegionID2())
previous_intronid=''
if splice_event == 'exon-region-exclusion':
previous_intronid = rd.ExonRegionID2()
else: previous_exonid=rd.ExonRegionID2()
index+=1
if introns == 'yes':
try:
intronid = rd.IntronRegionID() ### The intron block (relative to the exon block) is assigned in the function exon_clustering
for rd in intron_block_db[block]:
intron_pos = [rd.ExonStart(), rd.ExonStop()]; intron_pos.sort()
splice_event = rd.AssociatedSplicingEvent()
if gene in intron_retention_db2:
for retained_pos in intron_retention_db2[gene]:
#if '15' in intronid: print intron_pos,retained_pos;kill
if intron_pos == retained_pos: splice_event = 'intron-retention'
elif intron_pos[0] == retained_pos[0] or intron_pos[1] == retained_pos[1]: splice_event = 'intron-retention'
elif intron_pos[0]>retained_pos[0] and intron_pos[0]<retained_pos[1] and intron_pos[1]>retained_pos[0] and intron_pos[1]<retained_pos[1]: splice_event = 'intron-retention'
if 'intron' in splice_event: rd.setAssociatedSplicingEvent(splice_event); id = rd
if len(splice_event)>0: constitutive_call = 'no' ### If a splice-event is associated with a recommended constitutive region, over-ride it
else: constitutive_call = rd.ConstitutiveCallAbrev()
values = [gene,intronid,'i',str(index),str(rd.RegionNumber()),constitutive_call[0],'n',splice_event]#,str(intron_pos[0]),str(intron_pos[1])]
values = string.join(values,'\t')+'\n'; sgvdata.write(values); ens_exons=''
if len(splice_event)>0:
ens_exons = getMatchingEnsExons(rd.ExonStart(),rd.ExonStop(),intron_coordinate_db[gene])
exon_region_annotations[gene,intronid]=ens_exons
previous_intronid=intronid
start_stop = [rd.ExonStart(),rd.ExonStop()]; start_stop.sort(); start,stop = start_stop
splice_event = getUCSCSplicingAnnotations(ucsc_events,splice_event,start,stop)
if len(splice_event)>0: constitutive_call = 'no' ### If a splice-event is associated with a recommended constitutive region, over-ride it
else: constitutive_call = rd.ConstitutiveCallAbrev()
values = [gene,intronid,'chr'+rd.Chr(),rd.Strand(),str(start),str(stop),constitutive_call,ens_exons,splice_event,rd.AssociatedSplicingJunctions()]
values = string.join(values,'\t')+'\n'; exondata.write(values)
index+=1
except KeyError: null=[]
last_exon_region,null = string.split(rd.ExonRegionID2(),'.') ### e.g. E13.1 becomes E13, 1
utr_data = [gene,'U'+last_exon_region[1:]+'.1','u',str(index),'1','n','n','']
values = string.join(utr_data,'\t')+'\n'; sgvdata.write(values)
sgvdata.close(); reciprocol_junction_data.close()
for gene in excluded_intronic_junctions:
excluded_junction_exons=[]
last_region_db={}
exon_block_db=exon_regions[gene]
for block in exon_block_db:
for rd in exon_block_db[block]:
try: last_region_db[rd.ExonNumber()].append(rd.RegionNumber())
except KeyError: last_region_db[rd.ExonNumber()] = [rd.RegionNumber()]
###excluded_intronic_junctions These are Ensembl exons reclassified as belonging to a retained intron
### Nonetheless, we need to store the original junctions since they are not novel
for (ed1,ed2) in excluded_intronic_junctions[gene]:
#print ed1.ExonStop(),ed2.ExonStart()
### Store all exons aligning to retained introns and sort by position (needed for ordering)
if ed1.IntronDeletionStatus()== 'yes':
if ed1.Strand() == '-': start = ed1.ExonStop(); stop = ed1.ExonStart()
else: start = ed1.ExonStart(); stop = ed1.ExonStop()
excluded_junction_exons.append((start,stop,ed1))
if ed2.IntronDeletionStatus()== 'yes':
if ed2.Strand() == '-': start = ed2.ExonStop(); stop = ed2.ExonStart()
else: start = ed2.ExonStart(); stop = ed2.ExonStop()
excluded_junction_exons.append((start,stop,ed2))
excluded_junction_exons = unique.unique(excluded_junction_exons); excluded_junction_exons.sort()
for (start,stop,ed) in excluded_junction_exons:
### update the IntronIDs and annotations for each exon (could be present in multiple junctions)
#print gene, ed.GeneID(), len(intron_region_db[gene]), len(exon_regions[gene]), len(last_region_db)
try: intron_regions = intron_region_db[gene]
except KeyError: intron_regions = [] ### In very rare cases where we have merged exons with <500bp introns, no introns for the gene will 'exist'
ed,last_region_db=findAligningIntronBlock(ed,intron_regions,exon_regions[gene],last_region_db)
#print [[ed.ExonID(), ed.NewIntronRegion(), start, stop]]
ens_exons = getMatchingEnsExons(ed.ExonStart(),ed.ExonStop(),exon_coordinate_db[gene])
start_stop = [ed.ExonStart(),ed.ExonStop()]; start_stop.sort(); start,stop = start_stop
splice_event = getUCSCSplicingAnnotations(ucsc_events,ed.AssociatedSplicingEvent(),start,stop)
try: values = [gene,ed.NewIntronRegion(),'chr'+ed.Chr(),ed.Strand(),str(start),str(stop),'no',ens_exons,splice_event,ed.AssociatedSplicingJunctions()]
except Exception:
print gene, 'chr'+ed.Chr(),ed.Strand(),str(start),str(stop),'no',ens_exons,splice_event,ed.AssociatedSplicingJunctions()
print len(exon_regions[gene]), len(intron_regions), len(exon_regions[gene]), len(last_region_db); kill
values = string.join(values,'\t')+'\n'; exondata.write(values)
###Export the two individual exon regions for each exon junction
critical_gene_junction_db = eliminate_redundant_dict_values(critical_gene_junction_db)
exon_annotation_export = 'AltDatabase/ensembl/' +species+'/'+species+ '_SubGeneViewer_junction-data.txt'
fn=filepath(exon_annotation_export); sgvjdata = open(fn,'w')
title = ['gene',"5'exon-region","3'exon-region"]
title = string.join(title,'\t')+'\n'; sgvjdata.write(title)
alt_junction={}
for gene in critical_gene_junction_db:
for junction_ls in critical_gene_junction_db[gene]:
values = [gene,junction_ls[0],junction_ls[1]]
values = string.join(values,'\t')+'\n'; sgvjdata.write(values)
try: alt_junction[gene].append((junction_ls[0],junction_ls[1]))
except KeyError: alt_junction[gene]=[(junction_ls[0],junction_ls[1])]
### Include junctions for intron retention and exon-exclusion
if gene in intron_junction_db:
for junction_ls in intron_junction_db[gene]:
values = [gene,junction_ls[0],junction_ls[1]]
values = string.join(values,'\t')+'\n'; sgvjdata.write(values)
try: full_junction_db[gene].append((junction_ls[0],junction_ls[1]))
except KeyError: full_junction_db[gene] = [(junction_ls[0],junction_ls[1])]
try: alt_junction[gene].append((junction_ls[0],junction_ls[1]))
except KeyError: alt_junction[gene]=[(junction_ls[0],junction_ls[1])]
for gene in full_junction_db:
### Add junctions to the exon database
block_db = exon_regions[gene]
try: intron_block_db = intron_region_db[gene]
except KeyError: intron_block_db={}
for (exon1,exon2) in full_junction_db[gene]:
found1='no'; found2='no'
for block in block_db:
if found1 == 'no' or found2 == 'no':
for rd in block_db[block]:
if rd.ExonRegionID2() == exon1: le = rd; found1 = 'yes' ### Left exon found
if rd.ExonRegionID2() == exon2: re = rd; found2 = 'yes' ### Right exon found
if found1 == 'no' or found2 == 'no':
for block in intron_block_db:
for rd in intron_block_db[block]:
if rd.IntronRegionID() == exon1: le = rd; found1 = 'yes' ### Left exon found
if rd.IntronRegionID() == exon2: re = rd; found2 = 'yes' ### Right exon found
if found1 == 'yes' and found2 == 'yes':
ens_exons1 = exon_region_annotations[gene,exon1]
ens_exons2 = exon_region_annotations[gene,exon2]
const_call = 'no'
if gene in alt_junction:
if (exon1,exon2) in alt_junction[gene]: const_call = 'no'
elif le.ConstitutiveCall() == 'yes' and re.ConstitutiveCall() == 'yes': const_call = 'yes'
elif le.ConstitutiveCall() == 'yes' and re.ConstitutiveCall() == 'yes': const_call = 'yes'
ens_exons=combineAnnotations([ens_exons1,ens_exons2])
splice_event=combineAnnotations([le.AssociatedSplicingEvent(),re.AssociatedSplicingEvent()])
splice_junctions=combineAnnotations([le.AssociatedSplicingJunctions(),re.AssociatedSplicingJunctions()])
le_start_stop = [le.ExonStart(),le.ExonStop()]; le_start_stop.sort(); le_start,le_stop = le_start_stop
re_start_stop = [re.ExonStart(),re.ExonStop()]; re_start_stop.sort(); re_start,re_stop = re_start_stop
splice_event = getUCSCSplicingAnnotations(ucsc_events,splice_event,le_start,le_stop)
splice_event = getUCSCSplicingAnnotations(ucsc_events,splice_event,re_start,re_stop)
if len(splice_event)>0: const_call = 'no'
values = [gene,exon1+'-'+exon2,'chr'+le.Chr(),le.Strand(),str(le_start)+'|'+str(le_stop),str(re_start)+'|'+str(re_stop),const_call,ens_exons,splice_event,splice_junctions]
values = string.join(values,'\t')+'\n'; junctiondata.write(values)
#print exon1+'-'+exon2, le_start_stop,re_start_stop
if gene in excluded_intronic_junctions:
### Repeat for junctions that occur in AltAnalyze determined retained introns
for (le,re) in excluded_intronic_junctions[gene]:
if le.IntronDeletionStatus()== 'yes': exon1 = le.NewIntronRegion()
else: exon1 = le.ExonRegionID2()
if re.IntronDeletionStatus()== 'yes': exon2 = re.NewIntronRegion()
else: exon2 = re.ExonRegionID2()
ens_exons1 = le.ExonID()
ens_exons2 = re.ExonID()
const_call = 'no'
ens_exons=combineAnnotations([ens_exons1,ens_exons2])
splice_event=combineAnnotations([le.AssociatedSplicingEvent(),re.AssociatedSplicingEvent()])
splice_junctions=combineAnnotations([le.AssociatedSplicingJunctions(),re.AssociatedSplicingJunctions()])
le_start_stop = [le.ExonStart(),le.ExonStop()]; le_start_stop.sort(); le_start,le_stop = le_start_stop
re_start_stop = [re.ExonStart(),re.ExonStop()]; re_start_stop.sort(); re_start,re_stop = re_start_stop
splice_event = getUCSCSplicingAnnotations(ucsc_events,splice_event,le_start,le_stop)
splice_event = getUCSCSplicingAnnotations(ucsc_events,splice_event,re_start,re_stop)
values = [gene,exon1+'-'+exon2,'chr'+le.Chr(),le.Strand(),str(le_start)+'|'+str(le_stop),str(re_start)+'|'+str(re_stop),const_call,ens_exons,splice_event,splice_junctions]
values = string.join(values,'\t')+'\n'; junctiondata.write(values)
#print exon1, le_start_stop, exon2, re_start_stop
sgvjdata.close(); exondata.close(); junctiondata.close()
def combineAnnotations(annotation_list):
annotation_list2=[]
for annotations in annotation_list:
annotations = string.split(annotations,'|')
for annotation in annotations:
if len(annotation)>0: annotation_list2.append(annotation)
annotation_list = unique.unique(annotation_list2)
annotation_list = string.join(annotation_list,'|')
return annotation_list
def getMatchingEnsExons(region_start,region_stop,exon_coord_db):
region_coord=[region_start,region_stop]; ens_exons=[]
region_coord.sort(); region_start,region_stop = region_coord
for exon_coord in exon_coord_db:
combined=region_coord+list(exon_coord)
combined.sort()
if region_start==combined[1] and region_stop==combined[-2]:
ens_exons.append(exon_coord_db[exon_coord])
ens_exons = string.join(ens_exons,'|')
return ens_exons
################# Begin Analysis from parsing files
class EnsemblInformation:
def __init__(self, chr, gene_start, gene_stop, strand, ensembl_gene_id, ensembl_exon_id, exon_start, exon_stop, constitutive_exon, new_exon_start, new_exon_stop, new_gene_start, new_gene_stop):
self._geneid = ensembl_gene_id; self._exonid = ensembl_exon_id; self._chr = chr
self._genestart = gene_start; self._genestop = gene_stop
self._exonstart = exon_start; self._exonstop = exon_stop
self._constitutive_exon = constitutive_exon; self._strand = strand
self._newgenestart = new_gene_start; self._newgenestop = new_gene_stop
self._newexonstart = new_exon_start; self._newexonstop = new_exon_stop
self._del_status = 'no' #default value
self._distal_intron_region = 1 #default value
self.mx='no'
def GeneID(self): return self._geneid
def ExonID(self): return self._exonid
def reSetExonID(self,exonid): self._exonid = exonid
def Chr(self): return self._chr
def Strand(self): return self._strand
def GeneStart(self): return self._genestart
def GeneStop(self): return self._genestop
def ExonStart(self): return self._exonstart
def ExonStop(self): return self._exonstop
def NewGeneStart(self): return self._newgenestart
def NewGeneStop(self): return self._newgenestop
def NewExonStart(self): return self._newexonstart
def NewExonStop(self): return self._newexonstop
def setConstitutive(self,constitutive_exon): self._constitutive_exon = constitutive_exon
def Constitutive(self): return self._constitutive_exon
def ConstitutiveCall(self):
if self.Constitutive() == '1': call = 'yes'
else: call = 'no'
return call
def ConstitutiveCallAbrev(self):
if self.Constitutive() == 'yes': call = 'yes'
elif self.Constitutive() == '1': call = 'yes'
else: call = 'no'
return call
def setSpliceData(self,splice_event,splice_junctions):
self._splice_event = splice_event; self._splice_junctions = splice_junctions
def setMutuallyExclusive(self): self.mx='yes'
def setIntronDeletionStatus(self,del_status):
self._del_status = del_status
def setAssociatedSplicingEvent(self,splice_event): self._splice_event = splice_event
def AssociatedSplicingEvent(self):
if self.mx == 'yes':
try:
self._splice_event = string.replace(self._splice_event,'cassette-exon','mutually-exclusive-exon')
self._splice_event = string.join(unique.unique(string.split(self._splice_event,'|')),'|')
except AttributeError: return 'mutually-exclusive-exon'
try: return self._splice_event
except AttributeError: return ''
def updateDistalIntronRegion(self): self._distal_intron_region+=1
def DistalIntronRegion(self): return self._distal_intron_region
def setNewIntronRegion(self,new_intron_region): self.new_intron_region = new_intron_region
def NewIntronRegion(self): return self.new_intron_region
def IntronDeletionStatus(self):
try: return self._del_status
except AttributeError: return 'no'
def setAssociatedSplicingJunctions(self,splice_junctions): self._splice_junctions = splice_junctions
def AssociatedSplicingJunctions(self):
try: return self._splice_junctions
except AttributeError: return ''
def setExonRegionIDs(self,id): self._exon_region_ids = id
def ExonRegionIDs(self): return self._exon_region_ids
def setJunctionCoordinates(self,start1,stop1,start2,stop2):
self.start1=start1;self.stop1=stop1;self.start2=start2;self.stop2=stop2
def JunctionCoordinates(self): return [self.start1,self.stop1,self.start2,self.stop2]
def JunctionDistance(self):
jc = self.JunctionCoordinates(); jc.sort(); distance = int(jc[2])-int(jc[1])
return distance
def ExonNumber(self): return self._exon_num
def RegionNumber(self): return self._region_num
def ExonRegionNumbers(self): return (self._exon_num,self._region_num)
def ExonRegionID(self): return 'E'+str(self._exon_num)+'-'+str(self._region_num)
def ExonRegionID2(self): return 'E'+str(self._exon_num)+'.'+str(self._region_num)
def IntronRegionID(self): return 'I'+str(self._exon_num)+'.1'
def setExonSeq(self,exon_seq): self._exon_seq = exon_seq
def ExonSeq(self): return self._exon_seq
def setPrevExonSeq(self,prev_exon_seq): self._prev_exon_seq = prev_exon_seq
def PrevExonSeq(self):
try: return self._prev_exon_seq
except Exception: return ''
def setNextExonSeq(self,next_exon_seq): self._next_exon_seq = next_exon_seq
def NextExonSeq(self):
try: return self._next_exon_seq
except Exception: return ''
def setPrevIntronSeq(self,prev_intron_seq): self._prev_intron_seq = prev_intron_seq
def PrevIntronSeq(self): return self._prev_intron_seq
def setNextIntronSeq(self,next_intron_seq): self._next_intron_seq = next_intron_seq
def NextIntronSeq(self): return self._next_intron_seq
def setPromoterSeq(self,promoter_seq): self._promoter_seq = promoter_seq
def PromoterSeq(self): return self._promoter_seq
def AllGeneValues(self):
output = str(self.ExonID())
return output
def __repr__(self): return self.AllGeneValues()
class ExonStructureData(EnsemblInformation):
def __init__(self, ensembl_gene_id, chr, strand, exon_start, exon_stop, constitutive_exon, ensembl_exon_id, transcriptid):
self._transcriptid = transcriptid
self._geneid = ensembl_gene_id; self._exonid = ensembl_exon_id; self._chr = chr
self._exonstart = exon_start; self._exonstop = exon_stop
self._constitutive_exon = constitutive_exon; self._strand = strand
self._distal_intron_region = 1; self.mx='no'
def TranscriptID(self): return self._transcriptid
class ExonRegionData(EnsemblInformation):
def __init__(self, ensembl_gene_id, chr, strand, exon_start, exon_stop, ensembl_exon_id, exon_region_id, exon_num, region_num, constitutive_exon):
self._exon_region_id = exon_region_id; self._constitutive_exon = constitutive_exon
self._geneid = ensembl_gene_id; self._exonid = ensembl_exon_id; self._chr = chr
self._exonstart = exon_start; self._exonstop = exon_stop; self._distal_intron_region = 1; self.mx='no'
self._exon_num = exon_num; self._region_num = region_num; self._strand = strand
class ProbesetAnnotation(EnsemblInformation):
def __init__(self, ensembl_exon_id, constitutive_exon, exon_region_id, splice_event, splice_junctions, exon_start, exon_stop):
self._region_num = exon_region_id; self._constitutive_exon = constitutive_exon;self._splice_event = splice_event
self._splice_junctions = splice_junctions; self._exonid = ensembl_exon_id
self._exonstart = exon_start; self._exonstop = exon_stop; self.mx='no'
class ExonAnnotationsSimple(EnsemblInformation):
def __init__(self, chr, strand, exon_start, exon_stop, ensembl_gene_id, ensembl_exon_id ,constitutive_exon, exon_region_id, splice_event, splice_junctions):
self._exon_region_ids = exon_region_id; self._constitutive_exon = constitutive_exon;self._splice_event =splice_event
self._splice_junctions = splice_junctions; self._exonid = ensembl_exon_id; self._geneid = ensembl_gene_id
self._chr = chr; self._exonstart = exon_start; self._exonstop = exon_stop; self._strand = strand; self.mx='no'
class CriticalExonInfo:
def __init__(self,geneid,critical_exon,splice_type,junctions):
self._geneid = geneid; self._junctions = junctions
self._critical_exon = critical_exon; self._splice_type = splice_type
def GeneID(self): return self._geneid
def Junctions(self): return self._junctions
def CriticalExonRegion(self): return self._critical_exon
def SpliceType(self): return self._splice_type
class RelativeExonLocations:
def __init__(self,exonid,pes,pee,nes,nee):
self._exonid = exonid; self._pes = pes; self._pee = pee
self._nes = nes; self._nee = nee
def ExonID(self): return self._exonid
def PrevExonCoor(self): return (self._pes,self._pee)
def NextExonCoor(self): return (self._nes,self._nee)
def __repr__(self): return self.ExonID()
################### Import exon coordinate/transcript data from BIOMART
def importEnsExonStructureDataSimple(species,type,gene_strand_db,exon_location_db,adjacent_exon_locations):
if type == 'ensembl': filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
elif type == 'ucsc': filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_filtered_mrna.txt'
elif type == 'ncRNA': filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_filtered_ncRNA.txt'
start_time = time.time()
fn=filepath(filename); x=0; k=[]; relative_exon_locations={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if 'Chromosome' in t[0]: type = 'old'###when using older builds of EnsMart versus BioMart
else: type = 'current'
x=1
else:
if type == 'old': chr, strand, gene, ens_transcriptid, ens_exonid, exon_start, exon_end, constitutive_exon = t
else: gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
###switch exon-start and stop if in the reverse orientation
if strand == '-1' or strand == '-': strand = '-'#; exon_start2 = int(exon_end); exon_end2 = int(exon_start); exon_start=exon_start2; exon_end=exon_end2
else: strand = '+'; exon_end = int(exon_end)#; exon_start = int(exon_start)
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
exon_end = int(exon_end); exon_start = int(exon_start)
exon_data = (exon_start,exon_end,ens_exonid)
try: relative_exon_locations[ens_transcriptid,gene,strand].append(exon_data)
except KeyError: relative_exon_locations[ens_transcriptid,gene,strand] = [exon_data]
gene_strand_db[gene] = strand
exon_location_db[ens_exonid] = exon_start,exon_end
###Generate a list of exon possitions for adjacent exons for all exons
first_exon_dbase={}
for (transcript,gene,strand) in relative_exon_locations:
relative_exon_locations[(transcript,gene,strand)].sort()
if strand == '-': relative_exon_locations[(transcript,gene,strand)].reverse()
i = 0
ex_ls = relative_exon_locations[(transcript,gene,strand)]
for exon_data in ex_ls:
exonid = exon_data[-1]
if i == 0: ### first exon
pes = -1; pee = -1 ###Thus, Index should be out of range, but since -1 is valid, it won't be
if strand == '-': ces = ex_ls[i][1]
else: ces = ex_ls[i][0]
try: first_exon_dbase[gene].append([ces,exonid])
except KeyError: first_exon_dbase[gene] = [[ces,exonid]]
else: pes = ex_ls[i-1][0]; pee = ex_ls[i-1][1] ###pes: previous exon start, pee: previous exon end
try: nes = ex_ls[i+1][0]; nee = ex_ls[i+1][1]
except IndexError: nes = -1; nee = -1
rel = RelativeExonLocations(exonid,pes,pee,nes,nee)
"""if exonid in adjacent_exon_locations:
rel1 = adjacent_exon_locations[exonid]
prev_exon_start,prev_exon_stop = rel1.NextExonCoor()
next_exon_start,next_exon_stop = rel1.PrevExonCoor()
if prev_exon_start == -1 or next_exon_start == -1:
adjacent_exon_locations[exonid] = rel ###Don't over-ride the exisitng entry if no exon is proceeding or following
else: adjacent_exon_locations[exonid] = rel"""
adjacent_exon_locations[exonid] = rel
i+=1
for gene in first_exon_dbase:
first_exon_dbase[gene].sort()
strand = gene_strand_db[gene]
if strand == '-': first_exon_dbase[gene].reverse()
first_exon_dbase[gene] = first_exon_dbase[gene][0][1] ### select the most 5' of the start exons for the gene
#print relative_exon_locations['ENSMUST00000025142','ENSMUSG00000024293','-']; kill
end_time = time.time(); time_diff = int(end_time-start_time)
print filename,"parsed in %d seconds" % time_diff
print len(gene_strand_db),'genes imported'
return gene_strand_db,exon_location_db,adjacent_exon_locations,first_exon_dbase
def importEnsExonStructureDataSimpler(species,type,relative_exon_locations):
if type == 'ensembl': filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
elif type == 'ucsc': filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_COMPLETE-mrna.txt'
start_time = time.time()
fn=filepath(filename); x=0; k=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if 'Chromosome' in t[0]: type = 'old'###when using older builds of EnsMart versus BioMart
else: type = 'current'
x=1
else:
if type == 'old': chr, strand, gene, ens_transcriptid, ens_exonid, exon_start, exon_end, constitutive_exon = t
else: gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
#if gene == 'ENSG00000143776'
###switch exon-start and stop if in the reverse orientation
if strand == '-1' or strand == '-': strand = '-'#; exon_start2 = int(exon_end); exon_end2 = int(exon_start); exon_start=exon_start2; exon_end=exon_end2
else: strand = '+'; exon_end = int(exon_end)#; exon_start = int(exon_start)
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
exon_end = int(exon_end); exon_start = int(exon_start)
exon_data = (exon_start,exon_end,ens_exonid)
try: relative_exon_locations[ens_transcriptid,gene,strand].append(exon_data)
except KeyError: relative_exon_locations[ens_transcriptid,gene,strand] = [exon_data]
return relative_exon_locations
def importEnsGeneData(species):
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
global ens_gene_db; ens_gene_db={}
importEnsExonStructureData(filename,species,'gene')
return ens_gene_db
def importEnsExonStructureData(filename,species,data2process):
start_time = time.time()
fn=filepath(filename); x=0; k=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if data2process == 'all':
###switch exon-start and stop if in the reverse orientation
if strand == '-1' or strand == '-': strand = '-'; exon_start2 = int(exon_end); exon_end2 = int(exon_start); exon_start=exon_start2; exon_end=exon_end2
else: strand = '+'; exon_end = int(exon_end); exon_start = int(exon_start)
if 'ENS' in gene: ens_exonid_data = string.split(ens_exonid,'.'); ens_exonid = ens_exonid_data[0]
if abs(exon_end-exon_start)>-1:
if abs(exon_end-exon_start)<1:
try: too_short[gene].append(exon_start)
except KeyError: too_short[gene] = [exon_start]
exon_coordinates = [exon_start,exon_end]
continue_analysis = 'no'
if test=='yes': ###used to test the program for a single gene
if gene in test_gene: continue_analysis='yes'
else: continue_analysis='yes'
if continue_analysis=='yes':
###Create temporary databases storing just exon and just trascript and then combine in the next block of code
initial_exon_annotation_db[ens_exonid] = gene,chr,strand,exon_start,exon_end,constitutive_exon
try: exon_transcript_db[ens_exonid].append(ens_transcriptid)
except KeyError: exon_transcript_db[ens_exonid] = [ens_transcriptid]
###Use this database to figure out which ensembl exons represent intron retention as a special case down-stream
try: transcript_exon_db[gene,chr,strand,ens_transcriptid].append(exon_coordinates)
except KeyError: transcript_exon_db[gene,chr,strand,ens_transcriptid] = [exon_coordinates]
###Store transcript data for downstream analyses
transcript_gene_db[ens_transcriptid] = gene,chr,strand
try: gene_transcript[gene].append(ens_transcriptid)
except KeyError: gene_transcript[gene] = [ens_transcriptid]
#print ens_exonid, exon_end
elif data2process == 'exon-transcript':
continue_analysis = 'yes'
if test=='yes': ###used to test the program for a single gene
if gene not in test_gene: continue_analysis='no'
if continue_analysis == 'yes':
try: exon_transcript_db[ens_exonid].append(ens_transcriptid)
except KeyError: exon_transcript_db[ens_exonid] = [ens_transcriptid]
elif data2process == 'gene': ens_gene_db[gene] = chr,strand
end_time = time.time(); time_diff = int(end_time-start_time)
try: print len(transcript_gene_db), "number of transcripts included"
except Exception: null=[]
print filename,"parsed in %d seconds" % time_diff
def getEnsExonStructureData(species,data_type):
start_time = time.time()
###Simple function to import and organize exon/transcript data
filename1 = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
filename2 = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_filtered_mrna.txt'
filename3 = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_filtered_ncRNA.txt'
data2process = 'all'
global initial_exon_annotation_db; initial_exon_annotation_db={}
global exon_transcript_db; exon_transcript_db={}
global transcript_gene_db; transcript_gene_db={}
global gene_transcript; gene_transcript={}
global transcript_exon_db; transcript_exon_db={}
global initial_junction_db; initial_junction_db = {}; global too_short; too_short={}
global ensembl_annotations; ensembl_annotations={}; global ensembl_gene_coordinates; ensembl_gene_coordinates = {}
if data_type == 'mRNA' or data_type == 'gene':
importEnsExonStructureData(filename2,species,data2process)
importEnsExonStructureData(filename1,species,data2process)
elif data_type == 'ncRNA': ###Builds database based on a mix of Ensembl, GenBank and UID ncRNA IDs
importEnsExonStructureData(filename3,species,data2process)
global exon_annotation_db; exon_annotation_db={} ###Agglomerate the data from above and store as an instance of the ExonStructureData class
for ens_exonid in initial_exon_annotation_db:
gene,chr,strand,exon_start,exon_end,constitutive_exon = initial_exon_annotation_db[ens_exonid]
ens_transcript_list = exon_transcript_db[ens_exonid]
###Record the coordiantes for matching up UCSC exon annotations with Ensembl genes
try: ensembl_gene_coordinates[gene].append(exon_start)
except KeyError: ensembl_gene_coordinates[gene] = [exon_start]
ensembl_gene_coordinates[gene].append(exon_end)
ensembl_annotations[gene] = chr,strand
y = ExonStructureData(gene, chr, strand, exon_start, exon_end, constitutive_exon, ens_exonid, ens_transcript_list)
exon_info = [exon_start,exon_end,y]
if gene in too_short: too_short_list = too_short[gene]
else: too_short_list=[]
if exon_start in too_short_list:# and exon_end in too_short_list: pass ###Ensembl exons that are one bp (start and stop the same) - fixed minor issue in 2.0.9
pass
else:
try: exon_annotation_db[(gene,chr,strand)].append(exon_info)
except KeyError: exon_annotation_db[(gene,chr,strand)] = [exon_info]
initial_exon_annotation_db={}; exon_transcript_db={}
print 'Exon and transcript data obtained for %d genes' % len(exon_annotation_db)
###Grab the junction data
for (gene,chr,strand,transcript) in transcript_exon_db:
exon_list_data = transcript_exon_db[(gene,chr,strand,transcript)];exon_list_data.sort()
if strand == '-': exon_list_data.reverse()
index=0
if gene in too_short: too_short_list = too_short[gene]
else: too_short_list=[]
while index+1 <len(exon_list_data):
junction_data = exon_list_data[index],exon_list_data[index+1]
if junction_data[0][0] not in too_short_list and junction_data[0][1] not in too_short_list and junction_data[1][0] not in too_short_list and junction_data[1][1] not in too_short_list: ###Ensembl exons that are one bp (start and stop the same)
try: initial_junction_db[(gene,chr,strand)].append(junction_data)
except KeyError: initial_junction_db[(gene,chr,strand)] = [junction_data]
index+=1
print 'Exon-junction data obtained for %d genes' % len(initial_junction_db)
###Find exons that suggest intron retention: Within a junction database, find exons that overlapp with junction boundaries
intron_retention_db={}; retained_intron_exons={}; delete_db={}
for key in initial_junction_db:
for junction_info in initial_junction_db[key]:
e5_pos = junction_info[0][1]; e3_pos = junction_info[1][0] ###grab the junction coordiantes
for exon_info in exon_annotation_db[key]:
exon_start,exon_stop,ed = exon_info
loc = [e5_pos,e3_pos]; loc.sort() ###The downstream functions need these two sorted
new_exon_info = loc[0],loc[1],ed
retained = compareExonLocations(e5_pos,e3_pos,exon_start,exon_stop)
exonid = ed.ExonID()
if retained == 'yes':
#print exonid,e5_pos,e3_pos,exon_start,exon_stop
intron_length = abs(e5_pos-e3_pos)
if intron_length>500:
ed.setIntronDeletionStatus('yes')
try: delete_db[key].append(exon_info)
except KeyError: delete_db[key] = [exon_info]
try: intron_retention_db[key].append(new_exon_info)
except KeyError: intron_retention_db[key]=[new_exon_info]
retained_intron_exons[exonid]=[]
#print key,ed.ExonID(),len(exon_annotation_db[key])
k=0 ### Below code removes exons that have been classified as retained introns - not needed when we can selective remove these exons with ed.IntronDeletionStatus()
#exon_annotation_db2={}
for key in exon_annotation_db:
for exon_info in exon_annotation_db[key]:
if key in delete_db:
delete_info = delete_db[key] ### coordinates and objects... looks like you can match up based on object memory locations
if exon_info in delete_info: k+=1
"""else:
try: exon_annotation_db2[key].append(exon_info)
except KeyError: exon_annotation_db2[key]=[exon_info]"""
"""else:
try: exon_annotation_db2[key].append(exon_info)
except KeyError: exon_annotation_db2[key]=[exon_info]"""
#exon_annotation_db = exon_annotation_db2; exon_annotation_db2=[]
transcript_exon_db=[]
print k, 'exon entries removed from primary exon structure, which occur in predicted retained introns'
initial_junction_db={}
print len(retained_intron_exons),"ensembl exons in %d genes, show evidence of being retained introns (only sequences > 500bp are removed from the database" % len(intron_retention_db)
end_time = time.time(); time_diff = int(end_time-start_time)
print "Primary databases built in %d seconds" % time_diff
ucsc_splicing_annot_db = alignToKnownAlt.importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db) ### Should be able to exclude
#except Exception: ucsc_splicing_annot_db={}
return exon_annotation_db,transcript_gene_db,gene_transcript,intron_retention_db,ucsc_splicing_annot_db
def compareExonLocations(e5_pos,e3_pos,exon_start,exon_stop):
sort_list = [e5_pos,e3_pos,exon_start,exon_stop]; sort_list.sort()
new_sort = sort_list[1:-1]
if e5_pos in new_sort and e3_pos in new_sort: retained = 'yes'
else: retained = 'no'
return retained
################### Import exon sequence data from BIOMART (more flexible alternative function to above)
def getSeqLocations(sequence,ed,strand,start,stop,original_start,original_stop):
cd = seqSearch(sequence,ed.ExonSeq())
if cd != -1:
if strand == '-': exon_stop = stop - cd; exon_start = exon_stop - len(ed.ExonSeq()) + 1
else: exon_start = start + cd; exon_stop = exon_start + len(ed.ExonSeq())-1
ed.setExonStart(exon_start); ed.setExonStop(exon_stop); ed.setGeneStart(original_start); ed.setGeneStop(original_stop)
#if ed.ExonID() == 'E10' and ed.ArrayGeneID() == 'G7225860':
#print exon_start, exon_stop,len(ed.ExonSeq()),ed.ExonSeq();kill
else:
cd = seqSearch(sequence,ed.ExonSeq()[:15])
#print ed.ExonSeq()[:15],ed.ExonSeq();kill
if cd == -1: cd = seqSearch(sequence,ed.ExonSeq()[-15:])
if cd != -1:
if strand == '-': exon_stop = stop - cd; exon_start = exon_stop - len(ed.ExonSeq()) + 1
else: exon_start = start + cd; exon_stop = exon_start + len(ed.ExonSeq())-1
ed.setExonStart(exon_start); ed.setExonStop(exon_stop); ed.setGeneStart(original_start); ed.setGeneStop(original_stop)
else:
ed.setGeneStart(original_start); ed.setGeneStop(original_stop) ### set these at a minimum for HTA arrays so that the pre-set exon coordinates are reported
return ed
def import_sequence_data(filename,filter_db,species,analysis_type):
"""Note: A current bug in this module is that the last gene is not analyzed"""
print "Begining generic fasta import of",filename
fn=filepath(filename);fasta = {}; exon_db = {}; gene_db = {}; cDNA_db = {};sequence = '';count = 0; seq_assigned=0
global temp_seq; temp_seq=''; damned =0; global failed; failed={}
addition_seq_len = 2000; var = 1000
if len(analysis_type)==2: analysis_parameter,analysis_type = analysis_type
else: analysis_parameter = 'null'
if 'gene' in fn or 'chromosome' in fn:
gene_strand_db,exon_location_db,adjacent_exon_locations,null = importEnsExonStructureDataSimple(species,'ucsc',{},{},{})
gene_strand_db,exon_location_db,adjacent_exon_locations,first_exon_db = importEnsExonStructureDataSimple(species,'ensembl',gene_strand_db,exon_location_db,adjacent_exon_locations)
null=[]
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
try:
if data[0] == '>':
if len(sequence) > 0:
if 'gene' in fn or 'chromosome' in fn:
start = int(start); stop = int(stop)
else:
try: exon_start = int(exon_start); exon_stop = int(exon_stop)
except ValueError: exon_start = exon_start
if strand == '-1': strand = '-'
else: strand = '+'
if strand == '-': new_exon_start = exon_stop; new_exon_stop = exon_start; exon_start = new_exon_start; exon_stop = new_exon_stop #"""
if 'exon' in fn:
exon_info = [exon_start,exon_stop,exon_id,exon_annot]
try: exon_db[(gene,chr,strand)].append(exon_info)
except KeyError: exon_db[(gene,chr,strand)] = [exon_info] #exon_info = (exon_start,exon_stop,exon_id,exon_annot)
fasta[geneid]=description
if 'cDNA' in fn:
cDNA_info = [transid,sequence]
try: cDNA_db[(gene,strand)].append(cDNA_info)
except KeyError: cDNA_db[(gene,strand)] = [cDNA_info]
if 'gene' in fn or 'chromosome' in fn:
temp_seq = sequence
if analysis_type == 'gene_count': fasta[gene]=[]
if gene in filter_db:
count += 1
if count == var: print var,'genes examined...'; var+=1000
#print gene, filter_db[gene][0].ArrayGeneID();kill
if (len(sequence) -(stop-start)) != ((addition_seq_len*2) +1):
###multiple issues can occur with trying to grab sequence up and downstream - for now, exlcude these
###some can be accounted for by being at the end of a chromosome, but not all.
damned +=1
try: failed[chr]+=1
except KeyError: failed[chr]=1
"""if chr in failed:
gene_len = (stop-start); new_start = start - addition_seq_len
null = sequence[(gene_len+2000):]"""
else:
original_start = start; original_stop = stop
start = start - addition_seq_len
stop = stop + addition_seq_len
strand = gene_strand_db[gene]
first_exonid = first_exon_db[gene]
fexon_start,fexon_stop = exon_location_db[first_exonid]
for ed in filter_db[gene]:
if analysis_type == 'get_locations':
ed = getSeqLocations(sequence,ed,strand,start,stop,original_start,original_stop)
if analysis_type == 'get_sequence':
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = ed
if analysis_parameter == 'region_only': ens_exon_list = [exon_id]
else: ens_exon_list = ed.ExonID()
for ens_exon in ens_exon_list:
if len(ens_exon)>0:
if analysis_parameter == 'region_only':
### Only extract the specific region exon sequence
exon_start,exon_stop = int(probe_start),int(probe_stop)
#if ':440657' in probeset_id: print ens_exon_list,probe_start,probe_stop
#probe_coord = [int(probe_start),int(probe_stop)]; probe_coord.sort()
#exon_start,exon_stop = probe_coord
else: exon_start,exon_stop = exon_location_db[ens_exon]
#if ':440657' in probeset_id: print [exon_start,exon_stop]
exon_sequence = grabSeq(sequence,strand,start,stop,exon_start,exon_stop,'exon')
#print [exon_id,exon_sequence, start,stop,exon_start,exon_stop];kill
"""Could repeat if we build another dictionary with exon->adjacent exon positions (store in a class where
you designate last and next exon posiitons for each transcript relative to that exon), to grab downstream, upsteam exon and intron sequences"""
try:
rel = adjacent_exon_locations[ens_exon]
prev_exon_start,prev_exon_stop = rel.PrevExonCoor()
next_exon_start,next_exon_stop = rel.NextExonCoor()
prev_exon_sequence = grabSeq(sequence,strand,start,stop,prev_exon_start,prev_exon_stop,'exon')
next_exon_sequence = grabSeq(sequence,strand,start,stop,next_exon_start,next_exon_stop,'exon')
seq_type = 'intron'
if strand == '-':
if 'alt-N-term' in ed.AssociatedSplicingEvent() or 'altPromoter' in ed.AssociatedSplicingEvent(): seq_type = 'promoter' ### Thus prev_intron_seq is used to designate an alternative promoter sequence
prev_intron_sequence = grabSeq(sequence,strand,start,stop,exon_stop,prev_exon_start,seq_type)
promoter_sequence = grabSeq(sequence,strand,start,stop,fexon_stop,-1,"promoter")
next_intron_sequence = grabSeq(sequence,strand,start,stop,next_exon_stop,exon_start,'intron')
else:
prev_intron_sequence = grabSeq(sequence,strand,start,stop,prev_exon_stop,exon_start,seq_type)
promoter_sequence = grabSeq(sequence,strand,start,stop,-1,fexon_start,"promoter")
next_intron_sequence = grabSeq(sequence,strand,start,stop,exon_stop,next_exon_start,'intron')
"""if 'ENS' in ens_exon:
print ens_exon, strand
print '1)',exon_sequence
print '2)',prev_intron_sequence[:20],prev_intron_sequence[-20:], len(prev_intron_sequence), strand,start,stop,prev_exon_stop,exon_start,seq_type, ed.AssociatedSplicingEvent()
print '3)',next_intron_sequence[:20],next_intron_sequence[-20:], len(next_intron_sequence)
print '4)',promoter_sequence[:20],promoter_sequence[-20:], len(promoter_sequence);kill"""
###Intron sequences can be extreemly long so just include the first and last 1kb
if len(prev_intron_sequence)>2001 and seq_type == 'promoter': prev_intron_sequence = prev_intron_sequence[-2001:]
elif len(prev_intron_sequence)>2001: prev_intron_sequence = prev_intron_sequence[:1000]+'|'+prev_intron_sequence[-1000:]
if len(next_intron_sequence)>2001: next_intron_sequence = next_intron_sequence[:1000]+'|'+next_intron_sequence[-1000:]
if len(promoter_sequence)>2001: promoter_sequence = promoter_sequence[-2001:]
except Exception:
### When analysis_parameter == 'region_only' an exception is desired since we only want exon_sequence
prev_exon_sequence=''; next_intron_sequence=''; next_exon_sequence=''; promoter_sequence=''
if analysis_parameter != 'region_only':
if strand == '-': promoter_sequence = grabSeq(sequence,strand,start,stop,fexon_stop,-1,"promoter")
else: promoter_sequence = grabSeq(sequence,strand,start,stop,-1,fexon_start,"promoter")
if len(promoter_sequence)>2001: promoter_sequence = promoter_sequence[-2001:]
ed.setExonSeq(exon_sequence) ### Use to replace the previous probeset/critical exon sequence with sequence corresponding to the full exon
seq_assigned+=1
if len(prev_exon_sequence)>0:
### Use to output sequence for ESE/ISE type motif searches
ed.setPrevExonSeq(prev_exon_sequence);
if len(next_exon_sequence)>0: ed.setNextExonSeq(next_exon_sequence)
if analysis_parameter != 'region_only':
ed.setPrevIntronSeq(prev_intron_sequence[1:-1]); ed.setNextIntronSeq(next_intron_sequence[1:-1])
ed.setPromoterSeq(promoter_sequence[1:-1])
else:
if strand == '-': promoter_sequence = grabSeq(sequence,strand,start,stop,fexon_stop,-1,"promoter")
else: promoter_sequence = grabSeq(sequence,strand,start,stop,-1,fexon_start,"promoter")
if len(promoter_sequence)>2001: promoter_sequence = promoter_sequence[-2001:]
ed.setPromoterSeq(promoter_sequence[1:-1])
sequence = ''; data2 = data[1:]; t= string.split(data2,'|'); gene,chr,start,stop = t
else:
data2 = data[1:]; t= string.split(data2,'|'); gene,chr,start,stop = t
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
if analysis_type == 'get_locations': ### Applies to the last gene sequence read
if ('gene' in fn or 'chromosome' in fn) and len(sequence) > 0:
start = int(start); stop = int(stop)
if analysis_type == 'gene_count': fasta[gene]=[]
if gene in filter_db:
original_start = start; original_stop = stop
start = start - addition_seq_len
stop = stop + addition_seq_len
strand = gene_strand_db[gene]
first_exonid = first_exon_db[gene]
fexon_start,fexon_stop = exon_location_db[first_exonid]
for ed in filter_db[gene]:
try: print ed.ExonStart(),'1'
except Exception: null=[]
ed = getSeqLocations(sequence,ed,strand,start,stop,original_start,original_stop)
for ed in filter_db[gene]:
print ed.ExonStart(),'2'
probesets_analyzed=0
for gene in filter_db:
for probe_data in filter_db[gene]: probesets_analyzed+=1
print "Number of imported sequences:", len(fasta),count
print "Number of assigned probeset sequences:",seq_assigned,"out of",probesets_analyzed
if len(exon_db) > 0: return exon_db,fasta
elif len(cDNA_db) > 0: return cDNA_db
elif len(fasta) > 0: return fasta
else: return filter_db
def grabSeq(sequence,strand,start,stop,exon_start,exon_stop,type):
proceed = 'yes'
if type != 'promoter' and (exon_start == -1 or exon_stop == -1): proceed = 'no' ###Thus no preceeding or succedding exons and thus no seq reported
if proceed == 'yes':
if strand == '-':
if exon_stop == -1: exon_stop = stop
exon_sequence = sequence[(stop-exon_stop):(stop-exon_stop)+(exon_stop-exon_start)+1]
else:
#print type, exon_start,start,exon_stop
if exon_start == -1: exon_start = start ### For last intron
exon_sequence = sequence[(exon_start-start):(exon_stop-start+1)]
else: exon_sequence = ''
return exon_sequence
def seqSearch(sequence,exon_seq):
cd = string.find(sequence,exon_seq)
if cd == -1:
rev_seq = reverse_orientation(exon_seq); cd = string.find(sequence,rev_seq)
return cd
def reverse_string(astring):
"http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65225"
revchars = list(astring) # string -> list of chars
revchars.reverse() # inplace reverse the list
revchars = ''.join(revchars) # list of strings -> string
return revchars
def reverse_orientation(sequence):
"""reverse the orientation of a sequence (opposite strand)"""
exchange = []
for nucleotide in sequence:
if nucleotide == 'A': nucleotide = 'T'
elif nucleotide == 'T': nucleotide = 'A'
elif nucleotide == 'G': nucleotide = 'C'
elif nucleotide == 'C': nucleotide = 'G'
exchange.append(nucleotide)
complementary_sequence = reverse_string(exchange)
return complementary_sequence
############# First pass for annotating exons into destict, ordered regions for further annotation
def annotate_exons(exon_location):
print "Begining to assign intial exon block and region annotations"
### Sort and reverse exon orientation for transcript_cluster exons
original_neg_strand_coord={}
###make negative strand coordinates look like positive strand to identify overlapping exons
for (geneid,chr,strand) in exon_location:
exon_location[(geneid,chr,strand)].sort()
if strand == '-':
exon_location[(geneid,chr,strand)].reverse()
denominator = exon_location[(geneid,chr,strand)][0][0] ###numerical coordiantes to subtract from to normalize negative strand data
for exon_info in exon_location[(geneid,chr,strand)]:
start,stop,ed = exon_info; ens_exon = ed.ExonID()
coordinates = stop,start; coordinates = copy.deepcopy(coordinates)###format these in reverse for analysis in other modules
original_neg_strand_coord[ens_exon] = coordinates
exon_info[0] = abs(start - denominator);exon_info[1] = abs(stop - denominator)
#alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z']
exon_location2={}; exon_temp_list=[]
for key in exon_location:
index = 1; index2 = 1; exon_list=[]; y = 0 #if key[-1] == '-':
for exon in exon_location[key]: #print exon[0],exon[1],len(exon_temp_list)
if exon[-1].IntronDeletionStatus() == 'yes': null=[] ### retained intron (don't include)
else:
if y == 0:
exon_info = ['E'+str(index)+'-1',exon,(index,1)]
exon_list.append(exon_info); y = 1; last_start = exon[0]; last_stop = exon[1]; index += 1; index2 = 2; exon_temp_list =[]; exon_temp_list.append(last_start); exon_temp_list.append(last_stop)
elif y == 1:
current_start = exon[0]; current_stop = exon[1]
if ((current_start >= last_start) and (current_start <= last_stop)) or ((last_start >= current_start) and (last_start <= current_stop)):
exon_info = ['E'+str(index-1) +'-'+ str(index2),exon,(index-1,index2)] #+alphabet[index2]
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index2 += 1; exon_temp_list.append(current_start); exon_temp_list.append(current_stop)
elif (abs(current_start - last_stop) < 1) or (abs(last_start - current_stop) < 1):
exon_info = ['E'+str(index-1) +'-'+ str(index2),exon,(index-1,index2)] #+alphabet[index2]
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index2 += 1; exon_temp_list.append(current_start); exon_temp_list.append(current_stop)
elif len(exon_temp_list)>3:
exon_temp_list.sort()
if (((current_start-1) > exon_temp_list[-1]) and ((current_stop-1) > exon_temp_list[-1])) or (((current_start+1) < exon_temp_list[0]) and ((current_stop+1) < exon_temp_list[0])):
###Thus an overlapp with atleast one exon DOESN'T exists
exon_info = ['E'+str(index)+'-1',exon,(index,1)]
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index += 1; index2 = 2; exon_temp_list=[]; exon_temp_list.append(current_start); exon_temp_list.append(current_stop)
else:
exon_info = ['E'+str(index-1) +'-'+ str(index2),exon,(index-1,index2)] #+alphabet[index2]
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index2 += 1; exon_temp_list.append(current_start); exon_temp_list.append(current_stop)
else:
exon_info = ['E'+str(index)+'-1',exon,(index,1)]
exon_list.append(exon_info); last_start = exon[0]; last_stop = exon[1]; index += 1; index2 = 2; exon_temp_list=[]; exon_temp_list.append(current_start); exon_temp_list.append(current_stop)
exon_location2[key] = exon_list
for key in exon_location2:
###Re-assign exon coordiantes back to the actual coordiantes for negative strand genes
strand = key[-1]
if strand == '-':
for exon_data in exon_location2[key]:
ed = exon_data[1][2]; ens_exon = ed.ExonID()
###re-assing the coordiantes
start,stop = original_neg_strand_coord[ens_exon]
exon_data[1][0] = start; exon_data[1][1] = stop
"""
for key in exon_location2:
if key[0] == 'ENSG00000129566':
for item in exon_location2[key]: print key, item"""
return exon_location2
def exon_clustering(exon_location):
"""for i in exon_location[('ENSG00000129566','14','-')]:print i"""
#exon_info = exon_start,exon_end,y
#try: exon_annotation_db[(geneid,chr,strand)].append(exon_info)
#except KeyError: exon_annotation_db[(geneid,chr,strand)] = [exon_info]
exon_clusters={}; region_locations={}; region_gene_db={}
for key in exon_location:
chr = 'chr'+key[1];entries = exon_location[key];strand = key[-1]; gene = key[0]
temp_exon_db={}; temp_exon_db2={}; exon_block_db={}
for exon in entries:
a = string.find(exon[0],'-') ###outdated code: if all have a '-'
if a == -1: exon_number = int(exon[0][1:])
else: exon_number = int(exon[0][1:a])
###Group overlapping exons into exon clusters
try: temp_exon_db[exon_number].append(exon[1])
except KeyError: temp_exon_db[exon_number] = [exon[1]]
###add all start and stop values to the temp database
try: temp_exon_db2[exon_number].append(exon[1][0])
except KeyError: temp_exon_db2[exon_number] = [exon[1][0]]
temp_exon_db2[exon_number].append(exon[1][1])
try: exon_block_db[exon_number].append(exon)
except KeyError: exon_block_db[exon_number] = [exon]
for exon in temp_exon_db:
#intron = 'I'+str(exon)+'-1'
exon_info = unique.unique(temp_exon_db2[exon])
exon_info.sort();start = exon_info[0];stop = exon_info[-1];type=[]; accession=[]
for (exon_start,exon_stop,ed) in temp_exon_db[exon]:
exon_type = ed.Constitutive()
exon_id = ed.ExonID()
type.append(exon_type); accession.append(exon_id)
#if exon_id == 'ENSE00000888906': print exon_info,temp_exon_db[exon]
type=unique.unique(type); accession=unique.unique(accession)
exon_data = exon,(start,stop),accession,type
key1 = key[0],chr,strand
try: exon_clusters[key1].append(exon_data)
except KeyError: exon_clusters[key1] = [exon_data]
#if len(exon_info)-len(temp_exon_db[exon])>2: print key1,len(exon_info),len(temp_exon_db[exon]);kill
if len(exon_info)>2:
if strand == '-': exon_info.reverse()
index=0; exon_data_list=[]
while index < (len(exon_info)-1):
region = str(index+1)#;region_locations[key,'E'+str(exon)+'-'+region] = exon_info[index:index+2]
if strand == '-': new_stop,new_start = exon_info[index:index+2]
else: new_start,new_stop = exon_info[index:index+2]
ned = ExonStructureData(gene, key[1], strand, new_start, new_stop, '', '', [])
new_exon_info = ['E'+str(exon)+'-'+region,[new_start,new_stop,ned],(exon,index+1)]
exon_data_list.append(new_exon_info)
index+=1
#if gene == 'ENSG00000171735':print new_exon_info, 0
region_locations[key,exon] = exon_data_list
else:
exon_data_list = [exon_block_db[exon][0]] ###There can be multiples that occur - 2 exons 1 set of coordinates
#if gene == 'ENSG00000171735':print exon_data_list, 1
region_locations[key,exon] = exon_data_list
###Resort and re-populated the new exon_location database where we've re-defined the region entries
interim_location_db={}
for (key,exon) in region_locations:
exon_data_list = region_locations[(key,exon)]
for exon_data in exon_data_list:
try: interim_location_db[key].append((exon,exon_data))
except KeyError: interim_location_db[key] = [(exon,exon_data)]
for key in interim_location_db:
interim_location_db[key].sort(); new_exon_list=[]
for (e,i) in interim_location_db[key]: new_exon_list.append(i)
exon_location[key] = new_exon_list
#for i in exon_location[('ENSG00000171735', '1', '+')]:print i
"""
for i in region_locations:
if 'ENSG00000129566' in i[0]:print i, region_locations[i]
###Transform coordinates from the source Ensembl exon to discrete regions (regions may not be biological relevant in the 3' or 5' exon of the gene due to EST assemblies).
for (key,exon_id) in region_locations:
gene,chr,strand = key; id_added='no'; exon_annot=[]; ens_exonids=[]; ens_transcripts=[]
if strand == '-': stop,start = region_locations[(key,exon_id)]
else: start,stop = region_locations[(key,exon_id)]
###If the number of old regions is greater than the new, delete the old
previous_region_number = len(exon_location[key])
new_region_number = region_gene_db[key]
if previous_region_number>new_region_number:
for exon_data in exon_location[key]:
exon_start,exon_stop,ed = exon_data[1]; ens_exon_id = ed.ExonID(); exon_annotation = ed.Constitutive()
#if exon_id == exon_data[0]: exon_data[1][0] = start; exon_data[1][1] = stop; id_added = 'yes'
if exon_start == start or exon_stop == stop: exon_annot.append(exon_annotation); ens_exonids.append(ens_exon_id); ens_transcripts+=ed.TranscriptID()
if exon_stop == start or exon_start == stop: exon_annot.append(exon_annotation); ens_exonids.append(ens_exon_id)
exon_annot = unique.unique(exon_annot); ens_exonids = unique.unique(ens_exonids); ens_transcripts = unique.unique(ens_transcripts)
exon_annot = string.join(exon_annot,'|'); ens_exonids = string.join(ens_exonids,'|')
for exon_data in exon_location[key]:
if exon_id == exon_data[0]:
###Replace exsting entries (we're basically replacing these with completely new data, just like below, but this is easier than deleting the existing)
y = ExonStructureData(gene, chr, strand, start, stop, exon_annot, ens_exonids, ens_transcripts)
exon_data[1][0] = start; exon_data[1][1] = stop; exon_data[1][2] = y; id_added = 'yes'
#start is the lower number, with either strand
break
if id_added == 'no': ###This occurs when a new region must be introduced from a large now broken large exon (e.g. E1-1 pos: 1-300, E1-2 pos: 20-200, now need a 3rd E1-3 200-300)
indeces = string.split(exon_id[1:],'-')
index1 = int(indeces[0]); index2 = int(indeces[1])
#new_entry = ['E'+str(index-1) +'-'+ str(index2),exon,(index-1,index2)]
#exon_info = [exon_start,exon_stop,exon_id,exon_annot]
###Can include inappopriate exon IDs and annotations, but not worth specializing the code
y = ExonStructureData(gene, chr, strand, start, stop, exon_annot, ens_exonids, ens_transcripts)
exon_info = [start,stop,y]
new_entry = [exon_id,exon_info,(index1,index2)]
#if key == ('ENSG00000129566', '14', '-'): print key,new_entry;kill
exon_location[key].append(new_entry)"""
exon_regions = {}
for (gene,chr,strand) in exon_location:
for exon_data in exon_location[(gene,chr,strand)]:
try: exon_region_id,exon_detailed,(exon_num,region_num) = exon_data
except ValueError: print exon_data;kill
start,stop,ed = exon_detailed
if strand == '+': start,stop,ed = exon_detailed
else: stop,start,ed = exon_detailed
y = ExonRegionData(gene, chr, strand, start, stop, ed.ExonID(), exon_region_id, exon_num, region_num, ed.Constitutive())
try: exon_regions[gene].append(y)
except KeyError: exon_regions[gene] = [y]
"""
for key in exon_location:
if key[0] == 'ENSG00000075413':
print key
for item in exon_location[key]: print item"""
###Create a corresponding database of intron and locations for the clustered block exon database
intron_clusters={}; intron_region_db={}
for key in exon_clusters:
gene,chr,strand = key; chr = string.replace(chr,'chr','')
exon_clusters[key].sort(); index = 0
for exon_data in exon_clusters[key]:
try:
exon_num,(start,stop),null,null = exon_data
next_exon_data = exon_clusters[key][index+1]
en,(st,sp),null,null = next_exon_data
intron_num = exon_num
if strand == '+': intron_start = stop+1; intron_stop = st-1; intron_start_stop = (intron_start,intron_stop)
else: intron_start = start-1; intron_stop = sp+1; intron_start_stop = (intron_stop,intron_start)
index+=1
intron_data = intron_num,intron_start_stop,'','no'
try: intron_clusters[key].append(intron_data)
except KeyError: intron_clusters[key] = [intron_data]
###This database is used for SubGeneViewer and is analagous to region_db
intron_region_id = 'I'+str(exon)+'-1'
rd = ExonRegionData(gene, chr, strand, intron_start, intron_stop, ed.ExonID(), intron_region_id, intron_num, 1, 0)
#rd = ExonRegionData(gene, chr, strand, intron_start, intron_stop, ed.ExonID(), intron_region_id, intron_num, 1, 0)
if gene in intron_region_db:
block_db = intron_region_db[gene]
block_db[intron_num] = [rd]
else:
block_db={}; block_db[intron_num] = [rd]
intron_region_db[gene] = block_db
except IndexError: continue ### If no gene is added to intron_region_db, can be due to complete merger of all exons (multi-exon gene) when exon-exclusion occurs
return exon_clusters,intron_clusters,exon_regions,intron_region_db
def eliminate_redundant_dict_values(database):
for key in database:
list = makeUnique(database[key])
list.sort()
database[key] = list
return database
def getEnsemblAnnotations(filename,rna_processing_ensembl):
fn=filepath(filename)
ensembl_annotation_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if 'Description' in data: ensembl_gene_id,description,symbol = string.split(data,'\t')
else: ensembl_gene_id,symbol,description = string.split(data,'\t')
ensembl_annotation_db[ensembl_gene_id] = description, symbol
for gene in ensembl_annotation_db:
if gene in rna_processing_ensembl: mRNA_processing = 'RNA_processing/binding'
else: mRNA_processing = ''
index = ensembl_annotation_db[gene]
ensembl_annotation_db[gene] = index[0],index[1],mRNA_processing
exportEnsemblAnnotations(ensembl_annotation_db)
return ensembl_annotation_db
def exportEnsemblAnnotations(ensembl_annotation_db):
exon_annotation_export = 'AltDatabase/ensembl/' +species+'/'+species+ '_Ensembl-annotations.txt'
fn=filepath(exon_annotation_export); data = open(fn,'w')
for ensembl_gene in ensembl_annotation_db:
a = ensembl_annotation_db[ensembl_gene]
values = ensembl_gene +'\t'+ a[0] +'\t'+ a[1] +'\t'+ a[2] + '\n'
data.write(values)
data.close()
def reimportEnsemblAnnotations(species,symbolKey=False):
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations.txt'
fn=filepath(filename)
ensembl_annotation_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
ensembl_gene_id,symbol,description,mRNA_processing = string.split(data,'\t')
if symbolKey:
ensembl_annotation_db[symbol] = ensembl_gene_id
else:
ensembl_annotation_db[ensembl_gene_id] = symbol,description,mRNA_processing
return ensembl_annotation_db
def importPreviousBuildTranslation(filename,use_class_structures):
###When previous genome build coordinates a provided for an array - create a translation
###file between ensembl exon ids
fn=filepath(filename)
exon_coord_translation_db = {}; gene_coor_translation_db = {}; exon_db = {}
gene_coor_translation_list = []; x=0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if x == 0: x+=1 ###ignore the first line
else:
chr, gene_start, gene_stop, strand, ensembl_gene_id, ensembl_exon_id, exon_start, exon_stop, null, null, null,null, constitutive_exon = string.split(data,'\t')
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
gene_start = int(gene_start); gene_stop = int(gene_stop)
exon_start = int(exon_start); exon_stop = int(exon_stop)
if strand == '-1': strand = '-'
if strand == '1': strand = '+'
if strand == '-': new_gene_start = gene_stop; new_gene_stop = gene_start; new_exon_start = exon_stop; new_exon_stop = exon_start
else: new_gene_start = gene_start; new_gene_stop = gene_stop; new_exon_start = exon_start; new_exon_stop = exon_stop
new_exon_start = abs(new_gene_start - new_exon_start)
new_exon_stop = abs(new_gene_start - new_exon_stop)
###constitutive_exon column contains a 0 or 1: ensembl_exon_id is versioned
ensembl_exon_id,null = string.split(ensembl_exon_id,'.')
if use_class_structures == 'yes':
exon_coord_info = ensembl_gene_id,(exon_start,exon_stop),(gene_start,gene_stop),int(constitutive_exon)
ei = EnsemblInformation(chr, gene_start, gene_stop, strand, ensembl_gene_id, ensembl_exon_id, exon_start, exon_stop, constitutive_exon, new_exon_start, new_exon_stop, new_gene_start, new_gene_stop)
###Also independently determine exon clusters for previous build exon data
exon_annot=''; exon_info = (exon_start,exon_stop,ensembl_exon_id,exon_annot)
try: exon_db[(ensembl_gene_id,chr,strand)].append(exon_info)
except KeyError: exon_db[(ensembl_gene_id,chr,strand)] = [exon_info]
y = ei
else:
ei = [chr,(gene_start,gene_stop),ensembl_gene_id,(new_gene_start,new_gene_stop)]
y = [chr,(exon_start,exon_stop),ensembl_exon_id,(new_exon_start,new_exon_stop)]
try: exon_coord_translation_db[ensembl_gene_id].append(y)
except KeyError: exon_coord_translation_db[ensembl_gene_id] = [y]
gene_coor_translation_db[(chr,strand),gene_start,gene_stop] = ei
for key in gene_coor_translation_db:
ei = gene_coor_translation_db[key]
gene_coor_translation_list.append([key,ei])
gene_coor_translation_list.sort()
gene_coor_translation_list2={}
for key in gene_coor_translation_list:
chr_strand = key[0][0]
try: gene_coor_translation_list2[chr_strand].append(key[-1])
except KeyError: gene_coor_translation_list2[chr_strand] = [key[-1]]
gene_coor_translation_list = gene_coor_translation_list2
###Determine Exon Clusters for current Ensembl genes with poor linkage properties (can't be converted to old coordiantes)
exon_db2 = annotate_exons(exon_db)
exon_clusters = exon_clustering(exon_db2); exon_db2={}
return exon_coord_translation_db, exon_db, exon_clusters
def importExonTranscriptAnnotations(filename):
fn=filepath(filename)
exon_trans_association_db = {}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x+=1
else:
ensembl_gene_id,ensembl_trans_id,ensembl_exon_id,constitutive = string.split(data,'\t')
ensembl_exon_id,null = string.split(ensembl_exon_id,'.')
try: exon_trans_association_db[ensembl_exon_id].append([ensembl_trans_id,constitutive])
except KeyError: exon_trans_association_db[ensembl_exon_id] = [[ensembl_trans_id,constitutive]]
return exon_trans_association_db
def importEnsemblDomainData(filename):
fn=filepath(filename); x = 0; ensembl_ft_db = {}; ensembl_ft_summary_db = {} # Use the last database for summary statistics
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 1:
try: ensembl_gene, chr, mgi, uniprot, ensembl_prot, seq_data, position_info = string.split(data,'\t')
except ValueError: continue
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
ft_info_list = string.split(position_info,' | ')
for entry in ft_info_list:
try: peptide_start_end, gene_start_end, feature_source, interprot_id, description = string.split(entry,' ')
except ValueError: continue
###142-180 3015022-3015156 Pfam IPR002050 Env_polyprotein
ft_start_pos, ft_end_pos = string.split(peptide_start_end,'-')
pos1 = int(ft_start_pos); pos2 = int(ft_end_pos)
#sequence_fragment = seq_data[pos1:pos2]
if len(description)>1 or len(interprot_id)>1:
ft_info = [description,sequence_fragment,interprot_id]
ft_info2 = description,interprot_id
###uniprot_ft_db[id].append([ft,pos1,pos2,annotation])
try: ensembl_ft_db[id].append(ft_info)
except KeyError: ensembl_ft_db[id] = [ft_info]
try: ensembl_ft_summary_db[id].append(ft_info2)
except KeyError: ensembl_ft_summary_db[id] = [ft_info2]
elif data[0:6] == 'GeneID': x = 1
ensembl_ft_db = eliminate_redundant_dict_values(ensembl_ft_db)
ensembl_ft_summary_db = eliminate_redundant_dict_values(ensembl_ft_summary_db)
domain_gene_counts = {}
###Count the number of domains present in all genes (count a domain only once per gene)
for gene in ensembl_ft_summary_db:
for domain_info in ensembl_ft_summary_db[gene]:
try: domain_gene_counts[domain_info] += 1
except KeyError: domain_gene_counts[domain_info] = 1
print "Number of Ensembl genes, linked to array genes with domain annotations:",len(ensembl_ft_db)
print "Number of Ensembl domains:",len(domain_gene_counts)
return ensembl_ft_db,domain_gene_counts
def getEnsemblAssociations(Species,data_type,test_status):
global species; species = Species
global test; test = test_status
global test_gene
meta_test = ["ENSG00000224972","ENSG00000107077"]
test_gene = ['ENSG00000163132'] #'ENSG00000215305
#test_gene = ['ENSMUSG00000000037'] #'test Mouse - ENSMUSG00000000037
test_gene = ['ENSMUSG00000065005'] #'ENSG00000215305
test_gene = ['ENSRNOE00000194194']
test_gene = ['ENSG00000229611','ENSG00000107077','ENSG00000107077','ENSG00000107077','ENSG00000107077','ENSG00000107077','ENSG00000163132', 'ENSG00000115295']
test_gene = ['ENSG00000110955']
#test_gene = ['ENSMUSG00000059857'] ### for JunctionArrayEnsemblRules
#test_gene = meta_test
exon_annotation_db,transcript_gene_db,gene_transcript,intron_retention_db,ucsc_splicing_annot_db = getEnsExonStructureData(species,data_type)
exon_annotation_db2 = annotate_exons(exon_annotation_db); ensembl_descriptions={}
exon_db = customDBDeepCopy(exon_annotation_db2) ##having problems with re-writting contents of this db when I don't want to
exon_clusters,intron_clusters,exon_regions,intron_region_db = exon_clustering(exon_db); exon_db={}
exon_junction_db,putative_as_junction_db,exon_junction_db,full_junction_db,excluded_intronic_junctions = processEnsExonStructureData(exon_annotation_db,exon_regions,transcript_gene_db,gene_transcript,intron_retention_db)
exon_regions,critical_gene_junction_db = compareJunctions(species,putative_as_junction_db,exon_regions)
exportSubGeneViewerData(exon_regions,exon_annotation_db2,critical_gene_junction_db,intron_region_db,intron_retention_db,full_junction_db,excluded_intronic_junctions,ucsc_splicing_annot_db)
full_junction_db=[]
###Grab rna_processing Ensembl associations
use_exon_data='no';get_splicing_factors = 'yes'
try: rna_processing_ensembl = GO_parsing.parseAffyGO(use_exon_data,get_splicing_factors,species)
except Exception: rna_processing_ensembl={}
ensembl_annot_file = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt'
ensembl_annotation_db = getEnsemblAnnotations(ensembl_annot_file,rna_processing_ensembl)
#exportExonClusters(exon_clusters,species)
return exon_annotation_db2,ensembl_annotation_db,exon_clusters,intron_clusters,exon_regions,intron_retention_db,ucsc_splicing_annot_db,transcript_gene_db
def getExonTranscriptDomainAssociations(Species):
global species; species = Species
import_dir = '/AltDatabase/ensembl/'+species
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for file_name in dir_list: #loop through each file in the directory to output results
dir_file = 'AltDatabase/ensembl/'+species+'/'+file_name
if 'Exon_cDNA' in dir_file: exon_trans_file = dir_file
elif 'Domain' in dir_file: domain_file = dir_file
exon_trans_association_db = importExonTranscriptAnnotations(exon_trans_file)
return exon_trans_association_db
def exportExonClusters(exon_clusters,species):
exon_cluster_export = 'AltDatabase/ensembl/'+species+'/'+species+'-Ensembl-exon-clusters.txt'
fn=filepath(exon_cluster_export); data = open(fn,'w')
for key in exon_clusters:
ensembl_gene = key[0]
chr = key[1]
strand = key[2]
for entry in exon_clusters[key]:
exon_cluster_num = str(entry[0])
exon_start = str(entry[1][0])
exon_stop = str(entry[1][1])
exon_id_list = string.join(entry[2],'|')
annotation_list = string.join(entry[3],'|')
values = ensembl_gene +'\t'+ chr +'\t'+ strand +'\t'+ exon_cluster_num +'\t'+ exon_start +'\t'+ exon_stop +'\t'+ exon_id_list +'\t'+ annotation_list +'\n'
data.write(values)
data.close()
def checkforEnsemblExons(trans_exon_data):
proceed_status = 0
for (start_pos,ste,spe) in trans_exon_data:
#print ste.ExonRegionNumbers(),spe.ExonRegionNumbers(), [ste.ExonID(),spe.ExonID()];kill
if ste.ExonID() == '' or spe.ExonID() == '': print ste.ExonRegionNumbers(),spe.ExonRegionNumbers(), [ste.ExonID(),spe.ExonID()];kill
if ('-' in ste.ExonID()) and ('-' in spe.ExonID()): null=[]
else: proceed_status +=1
#else: print ste.ExonRegionNumbers(),spe.ExonRegionNumbers(), [ste.ExonID(),spe.ExonID()];kill
if proceed_status>0: proceed_status = 'yes'
else: proceed_status = 'no'
return proceed_status
def getEnsemblGeneLocations(species,array_type,key):
if key == 'key_by_chromosome':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
else:
if array_type == 'RNASeq':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
else:
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
fn=filepath(filename); x=0; gene_strand_db={}; gene_location_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
try: gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
except Exception:
if array_type == 'RNASeq': gene = t[0]; chr=t[2]; strand=t[3]; exon_start=t[4]; exon_end=t[5] ### for Ensembl_exon.txt
else: gene = t[2]; chr=t[4]; strand=t[5]; exon_start=t[6]; exon_end=t[7] ### for Ensembl_probesets.txt
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if strand == '-1' or strand == '-': strand = '-'
else: strand = '+'
exon_end = int(exon_end); exon_start = int(exon_start)
gene_strand_db[gene] = strand, chr
try: gene_location_db[gene]+=[exon_start,exon_end]
except Exception: gene_location_db[gene]=[exon_start,exon_end]
if key == 'key_by_chromosome':
location_gene_db={}; chr_gene_db={}
for gene in gene_location_db:
gene_location_db[gene].sort()
start = gene_location_db[gene][0]
end = gene_location_db[gene][-1]
strand,chr = gene_strand_db[gene]
location_gene_db[chr,start,end]=gene,strand
try: chr_gene_db[chr].append([start,end])
except Exception: chr_gene_db[chr]=[[start,end]]
return chr_gene_db,location_gene_db
else:
for gene in gene_location_db:
gene_location_db[gene].sort()
start = gene_location_db[gene][0]
end = gene_location_db[gene][-1]
strand,chr = gene_strand_db[gene]
gene_location_db[gene]=chr,strand,str(start),str(end)
return gene_location_db
def getAllEnsemblUCSCTranscriptExons():
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
importEnsExonStructureData(filename,species,'exon-transcript')
filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_mrna.txt'
importEnsExonStructureData(filename,species,'exon-transcript')
exon_transcripts = eliminate_redundant_dict_values(exon_transcript_db)
return exon_transcripts
def processEnsExonStructureData(exon_annotation_db,exon_regions,transcript_gene_db,gene_transcript,intron_retention_db):
###Parses transcript to exon relationships and links these to previously described distinct exon regions.
###The exon blocks and region numbers provide a simple semantic for determining where in the transcript and what event is occuring
###when directly comparing junctions to each other
exon_transcripts = getAllEnsemblUCSCTranscriptExons()
transcript_exon_db={}; constitutive_region_gene_db={}; null=[]; k=[]
for (gene,chr,strand) in exon_annotation_db:
constitutive_region_count_db={}; constitutive_region_count_list=[]; count_db={}
for exon_info in exon_annotation_db[(gene,chr,strand)]:
if strand == '+': exon_start,exon_end,y = exon_info ###Link the original exon-start/stop data back to the region based data
else: exon_end,exon_start,y = exon_info ###This shouldn't be necessary, but the coordinates get reversed in annotate_exons and copying the original via deepcopy takes way too much time
transcripts = exon_transcripts[y.ExonID()] ### ERROR WILL OCCUR HERE IF YOU DON'T REBUILD THE UCSC DATABASE!!! (different version still left over from Protein analysis methods)
if gene in exon_regions:
ste=''; spe=''
#print exon_start, y.ExonID(); y.TranscriptID(); y.IntronDeletionStatus()
for ed in exon_regions[gene]:
#print ed.ExonRegionID(),transcripts, [ed.ExonStart(), ed.ExonStop(), exon_start,exon_end]
### Since we know which exon region each Ensembl/UCSC exon begins and ends, we can just count the number of transcripts
### that contain each of the Ensembl/UCSC exon ID, which will give us our constitutive exon count for each gene
proceed = 'no'
if ((ed.ExonStart() >= exon_start) and (ed.ExonStart() <= exon_end)) and ((ed.ExonStop() >= exon_start) and (ed.ExonStop() <= exon_end)): proceed = 'yes'
elif ((ed.ExonStart() <= exon_start) and (ed.ExonStart() >= exon_end)) and ((ed.ExonStop() <= exon_start) and (ed.ExonStop() >= exon_end)): proceed = 'yes'
if proceed == 'yes': ### Ensures that each examined exon region lies directly within the examined exon (begining and end) -> version 2.0.6
try: constitutive_region_count_db[ed.ExonRegionID()]+=list(transcripts) #rd.ExonNumber() rd.ExonRegionID()
except KeyError: constitutive_region_count_db[ed.ExonRegionID()]=list(transcripts) ### If you don't convert to list, the existing list object will be updated non-specifically
### Identify the exon regions that associate with the begining and end-positions of each exon to later determine the junction in exon region space
if exon_start == ed.ExonStart(): ste = ed ### Two exon regions can not have the same start position in this database
if exon_end == ed.ExonStop(): spe = ed ### Two exon regions can not have the same end position in this database
if spe!='' and ste!='': break
#print ed.ExonStart(),ed.ExonStop()
if spe!='' and ste!='':
values = exon_start,ste,spe #,y
ste.reSetExonID(y.ExonID()); spe.reSetExonID(y.ExonID()) ###Need to represent the original source ExonID to eliminate UCSC transcripts without Ensembl exons
for ens_transcriptid in y.TranscriptID():
#print exon_start, ste.ExonID(), spe.ExonID(),y.TranscriptID()
try: transcript_exon_db[ens_transcriptid].append(values)
except KeyError: transcript_exon_db[ens_transcriptid] = [values]
else:
### Indicates this exon has been classified as a retained intron
### Keep it so we know where this exon is and to prevent inclusion of faulty flanking junctions
### This is needed so we can retain junctions that are unique to this transcript but not next to the retained intron
values = y.ExonStart(),y,y
for ens_transcriptid in y.TranscriptID():
try: transcript_exon_db[ens_transcriptid].append(values)
except KeyError: transcript_exon_db[ens_transcriptid] = [values]
else: k.append(gene)
### Get the number of transcripts associated with each region
for exon_region in constitutive_region_count_db:
#print exon_region, unique.unique(constitutive_region_count_db[exon_region])
transcript_count = len(unique.unique(constitutive_region_count_db[exon_region]))
constitutive_region_count_list.append(transcript_count)
try: count_db[transcript_count].append(exon_region)
except KeyError: count_db[transcript_count] = [exon_region]
constitutive_region_count_list = unique.unique(constitutive_region_count_list)
constitutive_region_count_list.sort()
try: max_count = constitutive_region_count_list[-1]
except Exception:
print gene, constitutive_region_count_list, constitutive_region_count_db, count_db;kill
#print count_db
cs_exon_region_ids = list(count_db[max_count])
### If there is only one constitutive region and one set of strong runner ups, include these to improve the constitutive expression prediction
if (len(cs_exon_region_ids)==1 and len(constitutive_region_count_list)>2) or len(constitutive_region_count_list)>3: ### Decided to add another heuristic that will add the second highest scoring exons always (if at least 4 frequencies present)
second_highest_count = constitutive_region_count_list[-2]
if (max_count-second_highest_count)<3: ### Indicates that the highest and second highest common exons be similiar in terms of their frequency (different by two transcripts at most)
#print cs_exon_region_ids
#print second_highest_count
if second_highest_count != 1: ### Don't inlcude if there is only one transcript contributing
cs_exon_region_ids += list(count_db[second_highest_count])
constitutive_region_gene_db[gene] = cs_exon_region_ids
#print gene, cs_exon_region_ids;sys.exit()
exon_transcripts=[]; del exon_transcripts
### Reset the constitutive exon, previously assigned by Ensembl - ours should be more informative since it uses more transcript data and specific region info
#"""
for gene in constitutive_region_gene_db:
if gene in exon_regions:
for ed in exon_regions[gene]:
if ed.ExonRegionID() in constitutive_region_gene_db[gene]: ed.setConstitutive('1')
else: ed.setConstitutive('0')
#print ed.ExonRegionID(), ed.Constitutive()
#"""
null = unique.unique(null); #print len(null)
print len(k), 'genes, not matched up to region database'
print len(transcript_gene_db), 'transcripts from Ensembl being analyzed'
transcript_exon_db = eliminate_redundant_dict_values(transcript_exon_db)
gene_transcript = eliminate_redundant_dict_values(gene_transcript)
gene_transcript_multiple={}; tc=0
for gene in gene_transcript:
if len(gene_transcript[gene])>1: tc+=1; gene_transcript_multiple[gene]=len(gene_transcript[gene])
print tc,"genes with multiple transcripts associated from Ensembl"
"""
###If a retained intron is present we must ignore all exons downstream of where we DELETED that exon information (otherwise there is false junciton information introduced)
###Here we simply delete the information for that transcript all together
td=0
for key in intron_retention_db:
transcripts_to_delete = {}
for (s1,s2,y) in intron_retention_db[key]:
if y.IntronDeletionStatus() == 'yes': ###only do this for exon's deleted upon import
for transcript in y.TranscriptID(): transcripts_to_delete[transcript] = []
for transcript in transcripts_to_delete:
###may not be present if the exon deleted constituted the whole transcript
if transcript in transcript_exon_db:
for (start_pos,ste,spe) in transcript_exon_db[transcript]:
print ste.ExonRegionID(),spe.ExonRegionID()
del transcript_exon_db[transcript]; td+=1
print td, "transcripts deleted with intron retention. Required for down-stream junction analysis"
"""
exon_junction_db={}; full_junction_db={}; excluded_intronic_junctions={}; rt=0
mx_detection_db={}; transcript_exon_region_db={}; #junction_transcript_db={}
###Sort and filter the junction data
for transcript in transcript_exon_db:
gene,chr,strand = transcript_gene_db[transcript]
transcript_exon_db[transcript].sort()
if strand == '-': transcript_exon_db[transcript].reverse()
index=0
#if 'BC' in transcript: print transcript, transcript_exon_db[transcript]
###Introduced a filter to remove transcripts from UCSC with no supporting Ensembl exons (distinct unknown transcript type)
###Since these are already exon regions, we exclude them from alt. splicing/promoter assignment.
proceed_status = checkforEnsemblExons(transcript_exon_db[transcript])
###Loop through the exons in each transcript
if proceed_status == 'yes':
#print transcript
#print transcript_exon_db[transcript]
for (start_pos,ste,spe) in transcript_exon_db[transcript]:
if (index+1) != len(transcript_exon_db[transcript]): ###Don't grab past the last exon in the transcript
start_pos2,ste2,spe2 = transcript_exon_db[transcript][index+1]
#print spe.ExonID(),ste.ExonID(),spe2.ExonID(),ste2.ExonID(), spe.ExonRegionID2(),ste.ExonRegionID2(),spe2.ExonRegionID2(),ste2.ExonRegionID2(),ste.IntronDeletionStatus(),ste2.IntronDeletionStatus(),spe.ExonStop(),ste2.ExonStart()
#print transcript,spe.ExonStop(),ste2.ExonStart(), ste.IntronDeletionStatus(),ste2.IntronDeletionStatus()
if ste.IntronDeletionStatus() == 'no' and ste2.IntronDeletionStatus() == 'no':
### Don't include junctions where the current or next junction was a removed retained intron (but keep other junctions in the transcript)
exon_junction = (ste.ExonRegionNumbers(),spe.ExonRegionNumbers()),(ste2.ExonRegionNumbers(),spe2.ExonRegionNumbers())
try: exon_junction_db[gene].append(exon_junction)
except KeyError: exon_junction_db[gene] = [exon_junction]
#try: junction_transcript_db[gene,exon_junction].append(transcript)
#except KeyError: junction_transcript_db[gene,exon_junction] = [transcript]
#try: transcript_exon_region_db[transcript]+=[ste.ExonRegionNumbers(),spe.ExonRegionNumbers(),ste2.ExonRegionNumbers(),spe2.ExonRegionNumbers()]
#except Exception: transcript_exon_region_db[transcript] = [ste.ExonRegionNumbers(),spe.ExonRegionNumbers(),ste2.ExonRegionNumbers(),spe2.ExonRegionNumbers()]
try: full_junction_db[gene].append((spe.ExonRegionID2(),ste2.ExonRegionID2()))
except KeyError: full_junction_db[gene] = [(spe.ExonRegionID2(),ste2.ExonRegionID2())]
### Look for potential mutually-exclusive splicing events
if (index+2) != len(transcript_exon_db[transcript]):
start_pos3,ste3,spe3 = transcript_exon_db[transcript][index+2]
if ste3.IntronDeletionStatus() == 'no':
try: mx_detection_db[gene,spe.ExonRegionID2(),ste3.ExonRegionID2()].append(spe2)
except KeyError: mx_detection_db[gene,spe.ExonRegionID2(),ste3.ExonRegionID2()]=[spe2]
else:
### Retain this information when exporting all known junctions
#print transcript,spe.ExonStop(),ste2.ExonStart()
try: excluded_intronic_junctions[gene].append((spe,ste2))
except KeyError: excluded_intronic_junctions[gene]=[(spe,ste2)]
index+=1
else: rt +=1
mx_exons=[]
for key in mx_detection_db:
gene = key[0]
if len(mx_detection_db[key])>1:
cassette_block_ids=[]; cassette_block_db={}
for spe2 in mx_detection_db[key]:
cassette_block_ids.append(spe2.ExonNumber())
cassette_block_db[spe2.ExonNumber()]=spe2
cassette_block_ids = unique.unique(cassette_block_ids)
if len(cassette_block_ids)>1:
for exon in cassette_block_ids:
spe2 = cassette_block_db[exon]
spe2.setMutuallyExclusive()
#print key, spe2.ExonRegionID(), spe2.ExonID()
#try: mx_exons[gene].append(spe2)
#except KeyError: mx_exons[gene]=[sp2]
print rt, "transcripts removed from analysis with no Ensembl exon evidence. Results in more informative splicing annotations downstream"
#print len(junction_transcript_db), 'length of junction_transcript_db'
print len(exon_junction_db),'length of exon_junction_db'
full_junction_db = eliminate_redundant_dict_values(full_junction_db)
###Stringent count, since it requires all region information for each exon and common splice events occur for just one region to another
###example: (((8, 1), (8, 1)), ((9, 1), (9, 1))), (((8, 1), (8, 1)), ((9, 1), (9, 2)))
putative_as_junction_db={}
for gene in exon_junction_db:
junctions = exon_junction_db[gene]
junction_count={}
for junction in exon_junction_db[gene]:
try: junction_count[junction]+=1
except KeyError: junction_count[junction]=1
count_junction={}; count_list=[]
for junction in junction_count:
count = junction_count[junction]
try: count_junction[count].append(junction)
except KeyError: count_junction[count] = [junction]
count_list.append(count)
count_list = unique.unique(count_list); count_list.sort() ###number of unique counts
if len(count_list)>1 and gene in gene_transcript_multiple: ###Otherwise, there is no variation in junction number between transcripts
transcript_number = gene_transcript_multiple[gene]
max_count = count_list[-1] ###most common junction - not alternatively spliced
if max_count == transcript_number: ###Ensures we are grabbing everything but constitutive exons (max_count can include AS if no Ensembl constitutive).
for count in count_junction:
if count != max_count:
junctions = count_junction[count]
junctions.sort()
try: putative_as_junction_db[gene]+=junctions
except KeyError: putative_as_junction_db[gene]=junctions
else:
try: putative_as_junction_db[gene]+=junctions
except KeyError: putative_as_junction_db[gene]=junctions
elif gene in gene_transcript_multiple:
###If there are multiple transcripts, descriminating these is difficult, just include all junctions for that gene
try: putative_as_junction_db[gene]+=junctions
except KeyError: putative_as_junction_db[gene]=junctions
return exon_junction_db,putative_as_junction_db,exon_junction_db,full_junction_db,excluded_intronic_junctions
def reformatJunctions(exons,type):
exons2=[]
for (b,i) in exons:
exons2.append('E'+str(b)+'.'+str(i))
if type == 'junction': exons2 = string.join(exons2,'-')
else: exons2 = string.join(exons2,'|')
return exons2
def compareJunctions(species,putative_as_junction_db,exon_regions,rootdir=None,searchChr=None):
### Add polyA site information and mutually-exclusive splicing site
if len(exon_regions)==0:
export_annotation = '_de-novo'
alt_junction_export = rootdir+'/AltDatabase/ensembl/'+species+'/denovo/'+species+'_alternative_junctions'+export_annotation+'.'+searchChr+'.txt'
import export
data = export.ExportFile(alt_junction_export)
else:
export_annotation = ''
alt_junction_export = 'AltDatabase/ensembl/'+species+'/'+species+'_alternative_junctions'+export_annotation+'.txt'
if export_annotation != '_de-novo':
print 'Writing the file:',alt_junction_export
print len(putative_as_junction_db),'genes being examined for AS/alt-promoters in Ensembl'
fn=filepath(alt_junction_export); data = open(fn,'w')
title = ['gene','critical-exon-id','junction1','junction2']
title = string.join(title,'\t')+'\n'; data.write(title)
###Find splice events based on structure based evidence
critical_exon_db={}; j=0; global add_to_for_terminal_exons; add_to_for_terminal_exons={}
complex3prime_event_db={}; complex5prime_event_db={}; cassette_exon_record={}
for gene in putative_as_junction_db:
#if gene == 'ENSMUSG00000000028': print putative_as_junction_db[gene];kill
for j1 in putative_as_junction_db[gene]:
for j2 in putative_as_junction_db[gene]: ### O^n squared query
if j1 != j2:
temp_junctions = [j1,j2]; temp_junctions.sort(); junction1,junction2 = temp_junctions
splice_junctions=[]; critical_exon=[]; splice_type=''
e1a,e2a = junction1; e1b,e2b = junction2 ###((8, 2), (8, 2)) break down the exon into exon_block,region tubles.
e1a3,e1a5 = e1a; e2a3,e2a5 = e2a; e1b3,e1b5 = e1b; e2b3,e2b5 = e2b ###(8, 2) break down the exons into single tuples (designating 5' and 3' ends of the exon):
e1a3_block,e1a3_reg = e1a3; e1a5_block,e1a5_reg = e1a5; e2a3_block,e2a3_reg = e2a3; e2a5_block,e2a5_reg = e1a5
e1b3_block,e1b3_reg = e1b3; e1b5_block,e1b5_reg = e1b5 ;e2b3_block,e2b3_reg = e2b3; e2b5_block,e2b5_reg = e1b5
splice_junctions = [(e1a5,e2a3),(e1b5,e2b3)] ###three junctions make up the cassette event, record the two evidenced by this comparison and agglomerate after all comps
splice_junction_str = reformatJunctions(splice_junctions[0],'junction')+'\t'+reformatJunctions(splice_junctions[1],'junction')
###IMPORTANT NOTE: The temp_junctions are sorted, but doesn't mean that splice_junctions is sorted correctly... must account for this
splice_junctions2 = list(splice_junctions); splice_junctions2.sort()
if splice_junctions2 != splice_junctions: ###Then the sorting is wrong and the down-stream method won't work
###Must re-do the above assingments
junction2,junction1 = temp_junctions
e1a,e2a = junction1; e1b,e2b = junction2 ###((8, 2), (8, 2)) break down the exon into exon_block,region tubles.
e1a3,e1a5 = e1a; e2a3,e2a5 = e2a; e1b3,e1b5 = e1b; e2b3,e2b5 = e2b ###(8, 2) break down the exons into single tuples (designating 5' and 3' ends of the exon):
e1a3_block,e1a3_reg = e1a3; e1a5_block,e1a5_reg = e1a5; e2a3_block,e2a3_reg = e2a3; e2a5_block,e2a5_reg = e1a5
e1b3_block,e1b3_reg = e1b3; e1b5_block,e1b5_reg = e1b5 ;e2b3_block,e2b3_reg = e2b3; e2b5_block,e2b5_reg = e1b5
splice_junctions = [(e1a5,e2a3),(e1b5,e2b3)]
splice_junction_str = reformatJunctions(splice_junctions[0],'junction')+'\t'+reformatJunctions(splice_junctions[1],'junction')
if e1a5_block == e2a3_block or e1b5_block == e2b3_block: continue ###suggests splicing within a block... we won't deal with these
if e1a5 == e1b5: ###If 5'exons in the junction are the same
###make sure the difference isn't in the 5' side of the next splice junction (or exon end)
if e2a3 != e2b3: #(((5, 1), (5, 1)*), ((6, 1)*, (6, 1))) -- (((5, 1), (5, 1)*), ((7, 1)*, (7, 1)))
if e2a3_block == e2b3_block: #[(((1, 1), (1, 1)*), ((2, 1)*, (2, 1))) ----(((1, 1), (1, 1)*), ((2, 3)*, (2, 3)))]
splice_type = "alt-3'"; critical_exon = pickOptimalCriticalExons(e1b5,e1a5,e2a3,e2b3,critical_exon)
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
else:
critical_exon = [e2a3]; splice_type = 'cassette-exon' #[(((1, 1), (1, 1)*), ((2, 1)*, (2, 1))) ----(((1, 1), (1, 1)*), ((3, 1)*, (3, 1)))]
try: add_to_for_terminal_exons[gene,e2a3_block].append(e2b3)
except KeyError: add_to_for_terminal_exons[gene,e2a3_block] = [e2b3]
try: cassette_exon_record[gene,e2a3_block].append(e1a5_block)
except KeyError: cassette_exon_record[gene,e2a3_block] = [e1a5_block]
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
#print critical_exon,splice_type,splice_junction_str
if splice_type =='' and e2a3 == e2b3: ###If 3'exons in the junction are the same
if e1a5 != e1b5:
if e1a5_block == e1b5_block: ###simple alt 5' splice site encountered
splice_type = "alt-5'"; critical_exon = pickOptimalCriticalExons(e1b5,e1a5,e2a3,e2b3,critical_exon)#[(((1, 1), (1, 1)*), ((2, 1)*, (2, 1))) ----(((1, 1), (1, 3)*), ((2, 1)*, (2, 1)))]
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
else:
splice_type = 'cassette-exon'; critical_exon = [e1b5] #[(((1, 1), (1, 1)*), ((3, 1)*, (3, 1))) ----(((2, 1), (2, 1)*), ((3, 1)*, (3, 1)))]
try: add_to_for_terminal_exons[gene,e1b5_block].append(e1a5)
except KeyError: add_to_for_terminal_exons[gene,e1b5_block] = [e1a5]
try: cassette_exon_record[gene,e1b5_block].append(e2b3_block)
except KeyError: cassette_exon_record[gene,e1b5_block] = [e2b3_block]
#if gene == 'ENSG00000128606' and critical_exon == [(4, 1)] : print junction1,junction2;kill
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
#print critical_exon,splice_type,splice_junction_str
if splice_type =='' and e2a3_block == e2b3_block and e1a5_block != e2a3_block and e1b5_block != e2b3_block: ###Begin looking at complex examples: If 3'exon blocks in the junction are the same
if e1a5_block == e1b5_block: #alt5'-alt3' [(((1, 1), (1, 1)*), ((2, 1)*, (2, 1))) ----(((1, 3), (1, 3)*), ((2, 3)*, (2, 3)))]
critical_exon = pickOptimalCriticalExons(e1b5,e1a5,e2a3,e2b3,critical_exon); splice_type = "alt5'-alt3'"
if len(critical_exon)>0:
alt5_exon = [critical_exon[0]]; alt3_exon = [critical_exon[1]]
#print alt5_exon,critical_exon,critical_exon;kill
y = CriticalExonInfo(gene,alt5_exon,"alt-5'",splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
y = CriticalExonInfo(gene,alt3_exon,"alt-3'",splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
else: #cassette-alt3' [(((1, 1), (1, 1)*), ((4, 1)*, (4, 1))) ----(((2, 1), (2, 3)*), ((4, 3)*, (4, 3)))]
critical_exon = pickOptimalCriticalExons(e1b5,e1a5,e2a3,e2b3,critical_exon)
splice_type = "alt-3'"
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
if e1a5_block < e1b5_block:
critical_exon = [e1b5]
try: add_to_for_terminal_exons[gene,e1b5_block] = [e1a5]
except KeyError: add_to_for_terminal_exons[gene,e1b5_block].append(e1a5)
complex3prime_event_db[gene,e1b5_block] = e1a5
try: cassette_exon_record[gene,e1b5_block].append(e2b3_block)
except KeyError: cassette_exon_record[gene,e1b5_block] = [e2b3_block]
elif e1a5_block != e1b5_block:
critical_exon = [e1a5]
try: add_to_for_terminal_exons[gene,e1a5_block] = [e1b5]
except KeyError: add_to_for_terminal_exons[gene,e1a5_block].append(e1b5)
complex3prime_event_db[gene,e1a5_block] = e1b5
try: cassette_exon_record[gene,e1a5_block].append(e2a3_block)
except KeyError: cassette_exon_record[gene,e1a5_block] = [e2a3_block]
splice_type = "cassette-exon"
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
if splice_type =='' and e1a5_block == e1b5_block and e1a5_block != e2a3_block and e1b5_block != e2b3_block:
#alt5'-cassette' [(((1, 1), (1, 1)*), ((4, 1)*, (4, 1))) ----(((1, 1), (1, 3)*), ((5, 1)*, (5, 1)))]
critical_exon = pickOptimalCriticalExons(e1b5,e1a5,e2a3,e2b3,critical_exon)
splice_type = "alt-5'"
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
if e2a3_block < e2b3_block:
critical_exon = [e2a3]
try: add_to_for_terminal_exons[gene,e2a3_block] = [e2b3]
except KeyError: add_to_for_terminal_exons[gene,e2a3_block].append(e2b3)
complex5prime_event_db[gene,e2a3_block] = e2b3
try: cassette_exon_record[gene,e2a3_block].append(e1a5_block)
except KeyError: cassette_exon_record[gene,e2a3_block] = [e1a5_block]
elif e2a3_block != e2b3_block:
critical_exon = [e2b3]
try: add_to_for_terminal_exons[gene,e2b3_block] = [e2a3]
except KeyError: add_to_for_terminal_exons[gene,e2b3_block].append(e2a3)
complex5prime_event_db[gene,e2b3_block] = e2a3
try: cassette_exon_record[gene,e2b3_block].append(e1b5_block)
except KeyError: cassette_exon_record[gene,e2b3_block] = [e1b5_block]
splice_type = "cassette-exon"
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
if splice_type =='' and e1a5_block<e1b5_block and e2b3_block>e2a3_block and e2a3_block>e1b5_block:
#mx-mix [(((1, 1), (1, 1)*), ((4, 1)*, (4, 1))) ----(((2, 1), (2, 1)*), ((5, 1)*, (5, 1)))]
critical_exon = [e2a3,e1b5]#; mx_event_db[gene,e2a3] = e1b5; mx_event_db[gene,e1b5] = e2a3
splice_type = 'cassette-exon'
#"""
try: add_to_for_terminal_exons[gene,e2a3_block].append(e2b3)
except KeyError: add_to_for_terminal_exons[gene,e2a3_block] = [e2b3]
try: add_to_for_terminal_exons[gene,e1b5_block].append(e1a5)
except KeyError: add_to_for_terminal_exons[gene,e1b5_block] = [e1a5]
#"""
try: cassette_exon_record[gene,e2a3_block].append(e1a5_block)
except KeyError: cassette_exon_record[gene,e2a3_block] = [e1a5_block]
try: cassette_exon_record[gene,e1b5_block].append(e2b3_block)
except KeyError: cassette_exon_record[gene,e1b5_block] = [e2b3_block]
#print 'mx-mx',critical_exon, splice_junctions
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
#print splice_type,critical_exon, gene
if splice_type =='' and e1a5_block<e1b5_block and e2a3_block>e2b3_block:
#(((2, 1), (2, 1)), ((9, 6), (9, 9))) (((4, 2), (4, 4)), ((7, 1), (7, 1))) ###one junction inside another
splice_type = "cassette-exon"; critical_exon = [e1b5,e2b3]
#"""
try: add_to_for_terminal_exons[gene,e2b3_block].append(e2a3)
except KeyError: add_to_for_terminal_exons[gene,e2b3_block] = [e2a3]
try: add_to_for_terminal_exons[gene,e1b5_block].append(e1a5)
except KeyError: add_to_for_terminal_exons[gene,e1b5_block] = [e1a5]
try: cassette_exon_record[gene,e2b3_block].append(e1b5_block)
except KeyError: cassette_exon_record[gene,e2b3_block] = [e1b5_block]
try: cassette_exon_record[gene,e1b5_block].append(e2b3_block)
except KeyError: cassette_exon_record[gene,e1b5_block] = [e2b3_block]
#"""
y = CriticalExonInfo(gene,critical_exon,splice_type,splice_junctions)
data.write(gene+'\t'+reformatJunctions(critical_exon,'exon')+'\t'+splice_junction_str+'\t'+splice_type+'\n')
try: critical_exon_db[gene].append(y)
except KeyError: critical_exon_db[gene] = [y]
data.close()
###Determine unique splice events and improve the annotations
if export_annotation != '_de-novo':
print len(critical_exon_db), 'genes identified from Ensembl, with alternatively regulated junctions'
cassette_exon_record = eliminate_redundant_dict_values(cassette_exon_record)
"""for i in cassette_exon_record:
print i, cassette_exon_record[i]"""
add_to_for_terminal_exons = eliminate_redundant_dict_values(add_to_for_terminal_exons)
###Store all region information in a dictionary for efficient recall
global region_db; region_db={}
for gene in exon_regions:
for rd in exon_regions[gene]:
try: region_db[gene,rd.ExonRegionNumbers()]=rd
except AttributeError: print gene, rd;kill
if len(exon_regions) == 0:
critical_exon_db_original = copy.deepcopy(critical_exon_db) ### get's modified somehow below
#critical_exon_db_original = manualDeepCopy(critical_exon_db) ### won't work because it is the object that is chagned
alternative_exon_db={}; critical_junction_db={}; critical_gene_junction_db={}
alternative_terminal_exon={}
for gene in critical_exon_db:
critical_exon_junctions={}; critical_exon_splice_type={}
for sd in critical_exon_db[gene]:
for critical_exon in sd.CriticalExonRegion():
try: critical_exon_junctions[critical_exon]+=sd.Junctions()
except KeyError: critical_exon_junctions[critical_exon]=sd.Junctions()
try: critical_exon_splice_type[critical_exon].append(sd.SpliceType())
except KeyError: critical_exon_splice_type[critical_exon]=[sd.SpliceType()]
for junction in sd.Junctions():
try: critical_junction_db[tuple(junction)].append(junction)
except KeyError: critical_junction_db[tuple(junction)]=[sd.SpliceType()]
critical_exon_junctions = eliminate_redundant_dict_values(critical_exon_junctions)
critical_exon_splice_type = eliminate_redundant_dict_values(critical_exon_splice_type)
for critical_exon in critical_exon_junctions:
cj = critical_exon_junctions[critical_exon]
splice_events = critical_exon_splice_type[critical_exon]
status = 'stop'
#print splice_events,critical_exon
if splice_events == ['cassette-exon'] or ((gene,critical_exon[0]) in complex3prime_event_db) or ((gene,critical_exon[0]) in complex5prime_event_db):
exons_blocks_joined_to_critical = cassette_exon_record[gene,critical_exon[0]]
cassette_status = check_exon_polarity(critical_exon[0],exons_blocks_joined_to_critical)
if len(critical_exon_junctions[critical_exon])<3 or cassette_status == 'no': ###Thus, it is not supported by 3 independent junctions
if len(exons_blocks_joined_to_critical)<2 or cassette_status == 'no':
if cj[0][1] == cj[1][1]:
splice_events = ['alt-N-term']
second_critical_exon = add_to_for_terminal_exons[gene,critical_exon[0]]; status = 'add_another'
alternative_terminal_exon[gene,critical_exon] = 'alt-N-term'
elif cj[0][0] == cj[1][0]:
splice_events = ['alt-C-term']
second_critical_exon = add_to_for_terminal_exons[gene,critical_exon[0]]; status = 'add_another'
alternative_terminal_exon[gene,critical_exon] = 'alt-C-term'
else:
if critical_exon == cj[0][1]:
splice_events = ['alt-C-term'] ###this should be the only alt-exon
alternative_terminal_exon[gene,critical_exon] = 'alt-C-term'
elif (gene,critical_exon[0]) in complex3prime_event_db:
#print '3prime',splice_events,critical_exon
if (gene,critical_exon[0]) in add_to_for_terminal_exons:
#print critical_exon,len(cassette_exon_record[gene,critical_exon[0]]),cassette_exon_record[gene,critical_exon[0]];kill
if len(exons_blocks_joined_to_critical)<2 or cassette_status == 'no':
second_critical_exon = add_to_for_terminal_exons[gene,critical_exon[0]]
splice_events = ['alt-N-term']; status = 'add_another'
alternative_terminal_exon[gene,critical_exon] = 'alt-N-term'
elif (gene,critical_exon[0]) in complex5prime_event_db:
#print '5prime',splice_events,critical_exon
if (gene,critical_exon[0]) in add_to_for_terminal_exons:
#print critical_exon,len(cassette_exon_record[gene,critical_exon[0]]),cassette_exon_record[gene,critical_exon[0]];kill
if len(exons_blocks_joined_to_critical)<2 or cassette_status == 'no':
second_critical_exon = add_to_for_terminal_exons[gene,critical_exon[0]]
splice_events = ['alt-C-term']; status = 'add_another'
alternative_terminal_exon[gene,critical_exon] = 'alt-C-term'
"""if 'mx-mx' in splice_events and (gene,critical_exon) in mx_event_db:
###if one exon is a true cassette exon, then the mx-mx is not valid
if (gene,critical_exon[0]) in add_to_for_terminal_exons:
second_critical_exon = add_to_for_terminal_exons[gene,critical_exon[0]]
#print gene,critical_exon,second_critical_exon;kill"""
splice_events = string.join(splice_events,'|'); exon_junction_str_list=[]
splice_events = string.replace(splice_events, 'cassette-exons','cassette-exon(s)')
###if the junctions comprising the splice event for an alt-cassette show evidence of multiple exons, annotate as such
if "alt5'-cassette" in splice_events: #or "cassette-alt3'"
for junction in cj:
splicing_annotations = critical_junction_db[tuple(junction)]
if 'cassette-exons' in splicing_annotations:
splice_events = string.replace(splice_events, "alt5'-cassette","alt5'-cassette(s)"); break
if "cassette-alt3'" in splice_events: #or "cassette-alt3'"
for junction in cj:
splicing_annotations = critical_junction_db[tuple(junction)]
if 'cassette-exons' in splicing_annotations:
splice_events = string.replace(splice_events, "cassette-alt3'","cassette(s)-alt3'"); break
###Currently, 'cassette-exon(s)' is redundant with "alt5'-cassette(s)" or "cassette(s)-alt3'", so simplify
if "alt5'-cassette(s)" in splice_events and 'cassette-exon(s)' in splice_events:
splice_events = string.replace(splice_events, 'cassette-exon(s)','')
if "cassette(s)-alt3'" in splice_events and 'cassette-exon(s)' in splice_events:
splice_events = string.replace(splice_events, 'cassette-exon(s)','')
splice_events = string.replace(splice_events, '||','|')
for j in cj:
nj=[]
for exon in j: e = 'E'+str(exon[0])+'.'+str(exon[1]); nj.append(e)
try: critical_gene_junction_db[gene].append(nj)
except KeyError: critical_gene_junction_db[gene] = [nj]
nj = string.join(nj,'-')
exon_junction_str_list.append(nj)
exon_junction_str = string.join(exon_junction_str_list,'|')
try:
rd = region_db[gene,critical_exon] ###('ENSG00000213588', (26, 1)) and ('ENSG00000097007', (79, 1)) didn't work
except KeyError:
###Occurs as a results of either exons or transcripts with sketchy or complex assignments
null = []
try:
se = rd.AssociatedSplicingEvent()
if len(se)>1:
if splice_events not in se: se = se+'|'+ splice_events
else: se = splice_events
rd.setSpliceData(se,exon_junction_str)
#print critical_exon,se,exon_junction_str,gene
if status == 'add_another':
for critical_exon in second_critical_exon:
rd = region_db[gene,critical_exon]
se = rd.AssociatedSplicingEvent()
if len(se)>1:
if splice_events not in se:
if se != 'cassette-exon': se = se+'|'+ splice_events
else: se = splice_events
rd.setSpliceData(se,exon_junction_str)
#print critical_exon, se, exon_junction_str, gene,'second'
"""
###create an index for easy searching of exon content in downstream modules
critical_exon_block = critical_exon[0]
if gene in alternative_exon_db:
block_db = alternative_exon_db[gene]
try: block_db[critical_exon_block].append(rd)
except KeyError: block_db[critical_exon_block] = [rd]
else:
block_db = {}; block_db[critical_exon_block]=[rd]
alternative_exon_db[gene]=block_db"""
except Exception: null=[] ### Occurs when analyzing novel junctions, rather than Ensembl
#alternative_terminal_exon[gene,critical_exon] = 'alt-C-term'
###Since setSpliceData will update the existing instance, we can just re-roder the region db for easy searching in downstream modules
### (note: the commented out code above could be useful for exon-structure output)
for gene in exon_regions:
block_db = {}
for rd in exon_regions[gene]:
try: block_db[rd.ExonNumber()].append(rd)
except KeyError: block_db[rd.ExonNumber()] = [rd]
exon_regions[gene] = block_db ###Replace the existing list with a dictionary for faster look-ups
if len(exon_regions)==0: exon_regions = critical_exon_db_original ### See JunctionArray.inferJunctionComps()
return exon_regions,critical_gene_junction_db
def manualDeepCopy(db):
### Same as deep copy, possibly less memory intensive
db_copy={}
for i in db:
db_copy[i] = list(tuple(db[i]))
return db_copy
def check_exon_polarity(critical_exon_block,exons_blocks_joined_to_critical):
g=0;l=0
for joined_exon_blocks in exons_blocks_joined_to_critical:
if joined_exon_blocks>critical_exon_block: g+=1
if joined_exon_blocks<critical_exon_block: l+=1
if g>0 and l>0: cassette_status = 'yes'
else: cassette_status = 'no'
return cassette_status
def pickOptimalCriticalExons(e1b5,e1a5,e2a3,e2b3,critical_exon):
e1a5_block,e1a5_reg = e1a5
e2a3_block,e2a3_reg = e2a3
e1b5_block,e1b5_reg = e1b5
e2b3_block,e2b3_reg = e2b3
if e1a5_block == e1b5_block:
if e1a5_reg < e1b5_reg: critical_exon += [e1b5]
elif e1a5_reg != e1b5_reg: critical_exon += [e1a5]
if e2a3_block == e2b3_block:
if e2a3_reg < e2b3_reg: critical_exon += [e2a3]
elif e2a3_reg != e2b3_reg: critical_exon += [e2b3]
return critical_exon
def customDBDeepCopy(db):
db2={}
for i in db:
for e in db[i]:
try: db2[i].append(e)
except KeyError: db2[i]=[e]
return db2
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def importExonRegionCoordinates(species):
""" Used to export Transcript-ExonRegionIDs """
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
fn=filepath(filename); x=0
exon_region_coord_db={}
all_coord_db={}
exon_region_db={}
strand_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x+=1
else:
gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t
start_end = [start, stop]; start_end.sort()
all_coord_db[gene,exonid] = start_end
try:
### start and stop should be unique
db = exon_region_coord_db[gene]
db['s',float(start)] = exonid ### start be the same as another region stop - designate as start
db['e',float(stop)] = exonid ### start be the same as another region stop - designate as end
except Exception:
db={}
db['s',float(start)] = exonid
db['e',float(stop)] = exonid
exon_region_coord_db[gene] = db
#if 'I' not in exonid: ### if it spans an intron, include it
try: exon_region_db[gene].append((exonid,start))
except Exception: exon_region_db[gene] = [(exonid,start)]
strand_db[gene] = strand
return exon_region_coord_db, all_coord_db, exon_region_db, strand_db
def exportTranscriptExonIDAssociations(species):
""" Determine the exon region ID composition of all analyzed mRNAs """
try: relative_exon_locations = importEnsExonStructureDataSimpler(species,'ucsc',{})
except Exception: relative_exon_locations={}
relative_exon_locations = importEnsExonStructureDataSimpler(species,'ensembl',relative_exon_locations)
from build_scripts import IdentifyAltIsoforms
seq_files, transcript_protein_db = IdentifyAltIsoforms.importProteinSequences(species,just_get_ids=True) ### get mRNA-protein IDs
exon_region_coord_db, all_coord_db, exon_region_db, strand_db = importExonRegionCoordinates(species)
export_file = 'AltDatabase/ensembl/'+species+'/mRNA-ExonIDs.txt'
export_data = export.ExportFile(export_file)
transcript_region_db={}
transcripts_with_missing_regions={}
errorCount=0
retainedIntrons=0
for key in relative_exon_locations: ### From UCSC or Ensembl transcript coordinates only
all_exon_intron_regions = {}
ens_transcriptid,gene,strand = key
#if ens_transcriptid != 'AK185721': continue
region_db={} ### track the coordinates for cleaning up the order
coord_db = exon_region_coord_db[gene]
for (region,start) in exon_region_db[gene]:
all_exon_intron_regions[region] = all_coord_db[gene,region]
region_db[region] = start
for exon_data in relative_exon_locations[key]: ### each transcript exon
regions=[]
exon_start,exon_end,ens_exonid = exon_data
start_end = [exon_start,exon_end]; start_end.sort(); start,end = start_end
partial_matches=[]
added=False
for region in all_exon_intron_regions: ### search each transcript exon against every annotated region
e5,e3 = all_exon_intron_regions[region]
annotated = [int(e5),int(e3)]
annotated.sort()
coords = [start_end[0],start_end[1]]+annotated
coords.sort()
if coords[0] == start_end[0] and coords[-1] == start_end[-1]:
### Hence, the exon/intron region is contained within or is equal to the transcript exon
if (gene,ens_transcriptid) in transcript_region_db:
if region not in transcript_region_db[gene,ens_transcriptid]:
transcript_region_db[gene,ens_transcriptid].append((region))
else:
transcript_region_db[gene,ens_transcriptid] = [region]
added=True
retainedIntrons+=1
else:
if annotated[0] == start_end[0] or annotated[-1] == start_end[-1]:
#print exon_start,exon_end, start_end, region, ens_transcriptid
partial_matches.append(region)
if added ==False:
if len(partial_matches)>0:
### Occurs typically when a UCSC exon has a longer or shorter 3' or 5'UTR exon boundaries
for region in partial_matches:
if (gene,ens_transcriptid) in transcript_region_db:
if region not in transcript_region_db[gene,ens_transcriptid]:
transcript_region_db[gene,ens_transcriptid].append((region))
else:
transcript_region_db[gene,ens_transcriptid] = [region]
else:
transcripts_with_missing_regions[ens_transcriptid]=None
errorCount+=1
#if errorCount<100: print 'Error:',key, exon_start,exon_end,ens_exonid
if (gene,ens_transcriptid) in transcript_region_db:
regions = transcript_region_db[gene,ens_transcriptid]
regions_sort=[]
for region in regions:
try: start = region_db[region]
except Exception,e: print e, coord_db;sys.exit()
regions_sort.append([start,region])
regions_sort.sort()
if strand_db[gene] == '-':regions_sort.reverse()
transcript_region_db[gene,ens_transcriptid] = map(lambda (s,r): r, regions_sort)
#print gene, ens_transcriptid, transcript_region_db[gene,ens_transcriptid];sys.exit()
print len(transcripts_with_missing_regions), 'transcripts with missing exon regions out of', len(transcript_region_db)+len(transcripts_with_missing_regions)
t1=[]
for i in transcripts_with_missing_regions:
t1.append(i)
print 'missing:',t1[:15]
exon_db={}
gene_region_db={}
for (gene,transcript) in transcript_region_db:
try: proteinAC = transcript_protein_db[transcript]
except Exception: proteinAC = 'None'
try: regions = string.join(transcript_region_db[gene,transcript],'|')
except Exception: print gene, transcript, transcript_region_db[gene,transcript];sys.exit()
gene_region_db[gene,regions]=[]
export_data.write(string.join([gene,transcript,proteinAC,regions],'\t')+'\n')
for exonID in transcript_region_db[gene,transcript]:
exon_db[gene+':'+exonID]=None
export_data.close()
print 'Unique-transcripts by regionID makeup:',len(gene_region_db) ### Complete UCSC gives 272071 versus 237607 for the non-Complete
filterExonRegionSeqeunces(exon_db,species)
def filterExonRegionSeqeunces(exon_db,species):
filename = 'AltDatabase/'+species+'/RNASeq/RNASeq_critical-exon-seq_updated.txt'
export_file = 'AltDatabase/'+species+'/RNASeq/RNASeq_critical-exon-seq_filtered.txt'
print 'importing', filename
fn=filepath(filename)
export_data = export.ExportFile(export_file)
for line in open(fn,'r').xreadlines():
data = line.strip()
t = string.split(data,'\t')
exonID = t[0]; sequence = t[-1]
try:
y = exon_db[exonID]
export_data.write(line)
except Exception: null=[] ### Occurs if there is no Ensembl for the critical exon or the sequence is too short to analyze
export_data.close()
def createExonRegionSequenceDB(species,platform):
""" Store the filtered exon sequence data in an SQL database for faster retreival """
start=time.time()
import SQLInterface
DBname = 'ExonSequence'
schema_text ='''-- Schema for species specific AltAnalyze transcript data.
-- Genes store general information on each Ensembl gene ID
create table ExonSeq (
uid text primary key,
gene text,
sequence text
);
'''
conn = SQLInterface.populateSQLite(species,platform,DBname,schema_text=schema_text) ### conn is the database connnection interface
### Populate the database
filename = 'AltDatabase/'+species+'/RNASeq/RNASeq_critical-exon-seq_filtered.txt'
print 'importing', filename
fn=filepath(filename)
for line in open(fn,'r').xreadlines():
data = line.strip()
t = string.split(data,'\t')
exonID = t[0]; sequence = t[-1]
gene,region = string.split(exonID,':')
#print exonID,gene,sequence
### Store this data in the SQL database
command = """insert into ExonSeq (uid, gene, sequence)
values ('%s', '%s','%s')""" % (exonID,gene,sequence)
conn.execute(command)
conn.commit() ### Needed to commit changes
conn.close()
time_diff = str(round(time.time()-start,1))
print 'Exon Region Sequences added to SQLite database in %s seconds' % time_diff
def importTranscriptExonIDs(species):
start=time.time()
filename = 'AltDatabase/ensembl/'+species+'/mRNA-ExonIDs.txt'
fn=filepath(filename)
gene_transcript_structure={}
protein_db = {}
for line in open(fn,'r').xreadlines():
data = line.strip()
gene,transcript,proteinAC,regions = string.split(data,'\t')
if gene in gene_transcript_structure:
tdb=gene_transcript_structure[gene]
tdb[transcript] = regions
else:
tdb={}
tdb[transcript] = regions+'|'
gene_transcript_structure[gene] = tdb
protein_db[proteinAC] = transcript
time_diff = str(round(time.time()-start,1))
#print 'Transcript-ExonID associations imported in %s seconds' % time_diff
return gene_transcript_structure, protein_db
def identifyPCRregions(species,platform,uid,inclusion_junction,exclusion_junction,isoform1,isoform2):
#print uid,inclusion_junction,exclusion_junction,isoform1,isoform2;sys.exit()
try:
x = len(gene_transcript_structure)
print_outs = False
except Exception:
gene_transcript_structure, protein_db = importTranscriptExonIDs(species)
print_outs = True
gene,region = string.split(uid,':')
isoform_db = copy.deepcopy(gene_transcript_structure[gene])
import SQLInterface
conn = SQLInterface.connectToDB(species,platform,'ExonSequence')
ids = [gene]
query = "select uid, sequence from ExonSeq where gene = ?"
uid_sequence_list = SQLInterface.retreiveDatabaseFields(conn,ids,query)
ex1,ex2 = string.split(exclusion_junction,'-')
ex1b,ex2b = string.split(inclusion_junction,'-')
exon_seq_db={}
for (uid1,seq) in uid_sequence_list:
exon_seq_db[uid1] = seq
try:
print '('+ex1+')'+exon_seq_db[gene+':'+ex1]
print '('+ex2+')'+exon_seq_db[gene+':'+ex2]
print '('+ex1b+')'+exon_seq_db[gene+':'+ex1b]
print '('+ex2b+')'+exon_seq_db[gene+':'+ex2b]
except Exception:
pass
if print_outs == True:
#"""
for (uid1,seq) in uid_sequence_list:
if uid1 == uid:
print seq
#"""
try:
mRNA1 = protein_db[isoform1]
mRNA1_s = isoform_db[mRNA1]
except Exception:
mRNA1_s = string.replace(inclusion_junction,'-','|')
try:
mRNA2 = protein_db[isoform2]
mRNA2_s = isoform_db[mRNA2]
except Exception:
mRNA2_s = string.replace(exclusion_junction,'-','|')
print ex1,ex2
print [mRNA1_s]
print [mRNA2_s]
if mRNA1_s != None:
ex1_pos = string.find(mRNA1_s,ex1+'|') ### This is the location in the string where the exclusion junctions starts
ex1_pos = ex1_pos+1+string.find(mRNA1_s[ex1_pos:],'|') ### This is the location in the string where the inclusion exon starts
ex2_pos = string.find(mRNA1_s,ex2+'|')-1 ### This is the location in the string where the inclusion exon ends
print ex2_pos, ex1_pos
if ex2_pos<ex1_pos:
if mRNA2_s != None:
mRNA1_s = mRNA2_s
#mRNA1 = mRNA2
ex1_pos = string.find(mRNA1_s,ex1+'|') ### This is the location in the string where the exclusion junctions starts
ex1_pos = ex1_pos+1+string.find(mRNA1_s[ex1_pos:],'|') ### This is the location in the string where the inclusion exon starts
ex2_pos = string.find(mRNA1_s,ex2+'|')-1 ### This is the location in the string where the inclusion exon ends
if abs(ex1_pos-ex2_pos)<2:
### Incorrect isoform assignments resulting in faulty primer design
if ex1b+'|' in mRNA2_s and ex2b+'|' in mRNA2_s:
ex1 = ex1b
ex2 = ex2b
ex1_pos = string.find(mRNA1_s,ex1+'|') ### This is the location in the string where the exclusion junctions starts
ex1_pos = ex1_pos+1+string.find(mRNA1_s[ex1_pos:],'|') ### This is the location in the string where the inclusion exon starts
ex2_pos = string.find(mRNA1_s,ex2+'|')-1 ### This is the location in the string where the inclusion exon ends
inclusion_exons = string.split(mRNA1_s[ex1_pos:ex2_pos],'|') ### These are the missing exons from the exclusion junction (in between)
#if '-' in mRNA1_s:
common_exons5p = string.split(mRNA1_s[:ex1_pos-1],'|') ### Flanking full 5' region (not just the 5' exclusion exon region)
if (ex2_pos+1) == -1: ### Hence the 3' exon is the last exon in the mRNA
common_exons3p = [string.split(mRNA1_s,'|')[-1]]
inclusion_exons = string.split(mRNA1_s[ex1_pos:],'|')[:-1]
else:
common_exons3p = string.split(mRNA1_s[ex2_pos+1:],'|') ### Flanking full 3' region (not just the 3' exclusion exon region)
if gene == 'E1NSG00000205423':
#print mRNA1_s, ex2;sys.exit()
#print mRNA1_s;sys.exit()
print uid,inclusion_junction,exclusion_junction,isoform1,isoform2
print inclusion_exons
print common_exons5p, common_exons3p
print mRNA1_s, ex2_pos, ex1, ex2
sys.exit()
inclusion_exons = map(lambda x: gene+':'+x, inclusion_exons) ### add geneID prefix
common_exons5p = map(lambda x: gene+':'+x, common_exons5p) ### add geneID prefix
common_exons3p = map(lambda x: gene+':'+x, common_exons3p) ### add geneID prefix
inclusion_junction = string.replace(inclusion_junction,'-','|')+'|'
exclusion_junction = string.replace(exclusion_junction,'-','|')+'|'
#if inclusion_junction in mRNA1_s: print '1 true'
#if exclusion_junction in mRNA2_s: print '2 true'
#sys.exit()
uid_seq_db={} ### convert list to dictionary
for (uid,seq) in uid_sequence_list:
uid_seq_db[uid] = seq
#E213.1-E214.1 vs. E178.1-E223.1
if uid == 'ENSG00000145349:E13.1': print uid, seq, len(seq)
if uid == 'ENSG00000145349:E12.1': print uid, seq, len(seq)
if uid == 'ENSG00000145349:E19.1': print uid, seq, len(seq)
if uid == 'ENSG00000145349:E15.2': print uid, seq, len(seq)
if uid == 'ENSG00000145349:E18.1': print uid, seq, len(seq)
#sys.exit()
### Get the common flanking and inclusion transcript sequence
#print common_exons5p,common_exons3p;sys.exit()
print 1
common_5p_seq = grabTranscriptSeq(common_exons5p,uid_seq_db)
print 2
common_3p_seq = grabTranscriptSeq(common_exons3p,uid_seq_db)
print 3
inclusion_seq = grabTranscriptSeq(inclusion_exons,uid_seq_db)
print 'common_5p_seq:',[common_5p_seq]
print 'common_3p_seq:',[common_3p_seq]
print 'inclusion_seq:',[inclusion_seq], inclusion_exons
incl_isoform_seq = common_5p_seq+inclusion_seq+common_3p_seq
incl_isoform_seq_formatted = common_5p_seq[-100:]+'['+inclusion_seq+']'+common_3p_seq[:100]
excl_isoform_seq = common_5p_seq+common_3p_seq
c5pl=len(common_5p_seq)
c3pl=len(common_3p_seq)
ipl1=len(inclusion_seq)
il=len(incl_isoform_seq)
# Metrics to designate minimal search regions for primer3 > release 2.3
if c5pl>100: s1 = c5pl-100; e1 = 100 ### start looking for primers in the region (forward)
else: s1 = 0; e1 = c5pl
#if c3pl>200: s2 = c5pl+ipl1; e2 = 200
if c3pl>100: s2 = c5pl+ipl1; e2 = 100
else: s2 = c5pl+ipl1; e2 = c3pl
include_region = [s1,s2+e2]
include_region = [s1,e1+ipl1+e2]
include_region = map(lambda x: str(x), include_region)
target_region = [c5pl,ipl1]
target_region = map(lambda x: str(x), target_region)
input_dir = filepath('AltDatabase/primer3/temporary-files/temp1.txt')
output_file = filepath('AltDatabase/primer3/temporary-files/output1.txt')
try: os.remove(input_dir)
except Exception: pass
try: os.remove(output_file)
except Exception: pass
#print incl_isoform_seq
#print include_region
#print target_region;sys.exit()
input_dir = exportPrimerInputSeq(incl_isoform_seq,include_region,target_region)
primer3_file = getPrimer3Location()
#for Primer3 release 2.3 and greater: SEQUENCE_PRIMER_PAIR_OK_REGION_LIST=100,50,300,50 ; 900,60,, ; ,,930,100
#Left primer in the 50 bp region starting at position 100 AND right primer in the 50 bp region starting at position 300
### Run Primer3
command_line = [primer3_file, "<",'"'+input_dir+'"', ">",'"'+output_file+'"'] #-format_output
#command_line = [primer3_file, "-format_output <",'"'+input_dir+'"', ">",'"'+output_file+'"']
command_line = string.join(command_line,' ')
#print [command_line];sys.exit()
retcode = os.popen(command_line)
time.sleep(4)
"""
commandFinshed = False
while commandFinshed == False:
try: commandFinshed = checkFileCompletion(output_file)
except Exception,e: pass
"""
left_primer,right_primer,amplicon_size = importPrimer3Output(output_file,gene)
primers = 'F:'+left_primer+' R:'+right_primer+' sizes:'+str(amplicon_size)+'|'+str(amplicon_size-ipl1) + ' (inclusion-isoform:%s)' % incl_isoform_seq_formatted
right_primer_sense = reverse_orientation(right_primer)
if left_primer in excl_isoform_seq:
print 'Left',
else: kill
if right_primer_sense in excl_isoform_seq:
print 'Right'
else: kill
#if print_outs:
print primers
return primers
def checkFileCompletion(fn):
complete = False
for line in open(fn,'r').xreadlines():
if 'PRIMER_PRODUCT_SIZE' in line: complete = True
return complete
def grabTranscriptSeq(exons,uid_seq_db):
seq=''
#print exons
for uid in exons:
try: seq += uid_seq_db[uid]
except Exception: break ### MISSING SEQUENCE FROM THE DATABASE - OFTEN OCCURS IN THE LAST FEW EXONS - UNCLEAR WHY
return seq
def exportPrimerInputSeq(sequence,include_region,target_region):
tempdir = filepath('AltDatabase/primer3/temporary-files/temp1.txt')
export_obj = export.ExportFile(tempdir)
export_obj.write('PRIMER_SEQUENCE_ID=temp1\nSEQUENCE=')
export_obj.write(sequence+'\n')
export_obj.write('INCLUDED_REGION=')
export_obj.write(string.join(include_region,',')+'\n')
export_obj.write('PRIMER_PRODUCT_SIZE_RANGE=75-1000\n')
export_obj.write('TARGET=')
export_obj.write(string.join(target_region,',')+'\n')
export_obj.write('=')
export_obj.close()
return tempdir
def importPrimer3Output(fn,gene):
gene_transcript_structure={}
protein_db = {}
for line in open(fn,'r').xreadlines():
data = line.strip()
if 'PRIMER_LEFT_SEQUENCE=' in data:
left_primer = string.split(data,'PRIMER_LEFT_SEQUENCE=')[-1]
#print left_primer;
#print fn; sys.exit()
if 'PRIMER_RIGHT_SEQUENCE=' in data:
right_primer = string.split(data,'PRIMER_RIGHT_SEQUENCE=')[-1]
if 'PRIMER_PRODUCT_SIZE=' in data:
amplicon_size = int(string.split(data,'PRIMER_PRODUCT_SIZE=')[-1])
break
return left_primer,right_primer,amplicon_size
def getPrimer3Location():
primer3_dir = 'AltDatabase/primer3/'
if os.name == 'nt':
if '32bit' in architecture: primer3_file = primer3_dir + '/PC/32bit/primer3_core'; plat = 'Windows'
elif '64bit' in architecture: primer3_file = primer3_dir + '/PC/64bit/primer3_core'; plat = 'Windows'
elif 'darwin' in sys.platform: primer3_file = primer3_dir + '/Mac/primer3_core'; plat = 'MacOSX'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): primer3_file = primer3_dir + '/Linux/32bit/primer3_core'; plat = 'linux32bit'
elif '64bit' in platform.architecture(): primer3_file = primer3_dir + '/Linux/64bit/primer3_core'; plat = 'linux64bit'
primer3_file = filepath(primer3_file)
return primer3_file
def importComparisonSplicingData4Primers(filename,species):
fn=filepath(filename)
stringent_regulated_exons = {}
firstLine = True
global gene_transcript_structure
global protein_db
gene_transcript_structure, protein_db = importTranscriptExonIDs(species)
ei = export.ExportFile(filename[:-4]+'-primers.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
header = t
firstLine = False
ei.write(line)
else:
try:
if 'comparison' in header:
t = t[:-1]
if 'PSI' not in filename:
exonid = t[0]; symbol = t[2]; confirmed = t[5]; fold_change = abs(float(t[-2])); percent_exp = float(t[-3])
junctions = t[4]; isoforms = t[9]; splice_type = t[8]
else:
""" Update the code to work with PSI results files from the metaDataAnalysis script """
#UID,InclusionNotation,ClusterID,UpdatedClusterID,AltExons,EventAnnotation,Coordinates,ProteinPredictions,dPSI,rawp,adjp,avg1,avg2
uid = t[0]
print uid
uid_objects = string.split(uid,':')
symbol = uid_objects[0]
junctions = string.join(uid_objects[1:],':')
junctions = string.split(junctions,'|')
fold_change = abs(float(t[9]))
isoforms = t[8]
splice_type = t[6]
exonid = t[5]
#print symbol, junctions,fold_change,percent_exp,confirmed;sys.exit()
#if fold_change<2 and percent_exp>0.25 and (confirmed == 'yes'):
if fold_change < 50:
if 'alternative_polyA' not in splice_type and 'altPromoter' not in splice_type:
if len(junctions)==2:
j1, j2 = junctions
if 'ENS' in j1:
j1 = string.split(j1,':')[1]
j2 = string.split(j2,':')[1]
else:
j1, j2 = string.split(string.split(junctions,'|')[0],' vs. ')
#(-)alt-C-terminus,(-)AA:188(ENSP00000397452)->238(ENSP00000410667),(-)microRNA-target(hsa-miR-599:miRanda,hsa-miR-186:miRanda)
try:
iso1, iso2 = string.split(string.split(isoforms,'AA:')[1],')->')
iso1 = string.split(iso1,'(')[1]
iso2 = string.split(string.split(iso2,'(')[1],')')[0]
#print iso1, iso2
except:
iso1 = ''
iso2 = ''
#print j1, j2
#print symbol
try:
primer = identifyPCRregions(species,'RNASeq',exonid,j1,j2,iso1,iso2)
#print exonid, j1, j2, iso1, iso2
ei.write(string.join(t+[primer],'\t')+'\n')
#print primer, symbol, exonid
#sys.exit()
except Exception:
pass
print traceback.format_exc(),'\n'; #sys.exit()
#sys.exit()
except Exception:
#print traceback.format_exc(),'\n'#;sys.exit()
pass
ei.close()
if __name__ == '__main__':
###KNOWN PROBLEMS: the junction analysis program calls exons as cassette-exons if there a new C-terminal exon occurs downstream of that exon in a different transcript (ENSG00000197991).
Species = 'Hs'
test = 'yes'
Data_type = 'ncRNA'
Data_type = 'mRNA'
#E6.1-E8.2 vs. E5.1-E8.3
filename = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Ichi/August.11.2017/Events-dPSI_0.0_rawp/PSI.R636S_Homo_vs_WTC-limma-updated-Domains2-filtered.txt'
#filename = '/Users/saljh8/Desktop/dataAnalysis/SalomonisLab/Leucegene/July-2017/PSI/Events-dPSI_0.1_adjp/PSI.U2AF1-like_vs_OthersQPCR.txt'
#filename = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Ichi/Combined-junction-exon-evidence.txt'
#exportTranscriptExonIDAssociations(Species);sys.exit()
#createExonRegionSequenceDB(Species,'RNASeq'); sys.exit()
importComparisonSplicingData4Primers(filename,Species); sys.exit()
#ENSG00000154556:E8.2-E9.7|ENSG00000154556:E5.12-E9.7 alt-N-terminus|- alt-C-terminus|- AA:41(ENST00000464975-PEP)->43(ENST00000493709-PEP)|-
#ENSG00000161999:E2.3-E2.5 nonsense_mediated_decay|+ retained_intron|+ alt-N-terminus|+ alt-C-terminus|+ AA:72(ENST00000564436-PEP)->81(ENSP00000454700)|+
#ENSG00000122591:E12.2-E13.1|ENSG00000122591:E12.1-E13.1 alt-N-terminus|+ alt-C-terminus|+ AA:116(ENST00000498833-PEP)->471(ENSP00000397168)|+
#ENSG00000128891:E1.11-E2.1|ENSG00000128891:E1.10-E2.1 alt-N-terminus|+ AA:185(ENSP00000350695)->194(ENSP00000452773)|+
identifyPCRregions(Species,'RNASeq','ENSG00000128891:E1.12','E1.12-E2.1','E1.11-E2.1','ENSP00000350695','ENSP00000350695'); sys.exit()
#identifyPCRregions(Species,'RNASeq','ENSG00000133226:E9.1','E8.1-E9.1','E8.1-E11.3','ENST00000564436-PEP','ENSP00000454700'); sys.exit()
#createExonRegionSequenceDB(Species,'RNASeq'); sys.exit()
getEnsemblAssociations(Species,Data_type,test); sys.exit()
test_gene = ['ENSG00000143776']#,'ENSG00000154889','ENSG00000156026','ENSG00000148584','ENSG00000063176','ENSG00000126860'] #['ENSG00000105968']
meta_test = ["ENSG00000215305","ENSG00000179676","ENSG00000170484","ENSG00000138180","ENSG00000100258","ENSG00000132170","ENSG00000105767","ENSG00000105865","ENSG00000108523","ENSG00000150045","ENSG00000156026"]
#test_gene = meta_test
gene_seq_filename = 'AltDatabase/ensembl/'+Species+'/'+Species+'_gene-seq-2000_flank'
gene_db = import_sequence_data(gene_seq_filename,{},Species,'gene_count'); print len(gene_db); sys.exit()
import_dir = '/AltDatabase/ensembl/'+species
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for file_name in dir_list: #loop through each file in the directory to output results
dir_file = 'AltDatabase/ensembl/'+species+'/'+file_name
if 'exon' in dir_file: exon_file = dir_file
elif 'transcript' in dir_file: trans_file = dir_file
elif 'gene' in dir_file: gene_seq_file = dir_file
elif 'Exon_cDNA' in dir_file: exon_trans_file = dir_file
elif 'Domain' in dir_file: domain_file = dir_file
#"""
exon_annotation_db,transcript_gene_db,gene_transcript,transcript_exon_db,intron_retention_db,ucsc_splicing_annot_db = getEnsExonStructureData(species,data_type)
#exon_db = customDBDeepCopy(exon_annotation_db)
#"""
exon_annotation_db2 = annotate_exons(exon_annotation_db)
#kill
exon_db2 = customDBDeepCopy(exon_annotation_db2) ##having problems with re-writting contents of this db when I don't want to
exon_clusters,intron_clusters,exon_regions,intron_region_db = exon_clustering(exon_db2); exon_db2={}
#"""
exon_junction_db,putative_as_junction_db,exon_junction_db = processEnsExonStructureData(exon_annotation_db,exon_regions,transcript_gene_db,gene_transcript,transcript_exon_db,intron_retention_db)
s
#ej = {};ej['ENSG00000124104'] = exon_junction_db['ENSG00000124104']
#exon_regions,critical_gene_junction_db = compareJunctions(putative_as_junction_db,exon_regions)
#exportSubGeneViewerData(exon_regions,critical_gene_junction_db,intron_region_db,intron_retention_db)
#ENSG00000149792 possible retained 3' intron... check out
kill
use_exon_data='no';get_splicing_factors = 'yes'
rna_processing_ensembl = GO_parsing.parseAffyGO(use_exon_data,get_splicing_factors,species)
ensembl_annot_file = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt'
ensembl_annotation_db = getEnsemblAnnotations(ensembl_annot_file,rna_processing_ensembl)
###Print ovelap statistics for exon-blocks
x=0;y=0; m=0; l=0
for key in exon_clusters:
for exon_block in exon_clusters[key]:
if len(exon_block[2])>1: y += 1; x += 1; m += len(exon_block[2]); l += len(exon_block[2])
else: x += 1; m += 1
#if x < 50:
#print key[0], exon_block[2], len(exon_block[2]),x,y,m,l
print 'x',x,'y',y,'m',m,'l',l
"""
for gene in exon_regions:
db = exon_regions[gene]
for block in db:
for rd in db[block]:
try: print rd.AssociatedSplicingEvent(),rd.AssociatedSplicingJunctions();kill
except AttributeError: continue"""
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/EnsemblImport.py
|
EnsemblImport.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from build_scripts import EnsemblImport
import copy
import time
from build_scripts import alignToKnownAlt
import update
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
################### Import exon coordinate/transcript data from BIOMART
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def covertStrToListInt(str_data):
str_data = string.replace(str_data,'"','')
list = string.split(str_data,',');list2=[]
try:
for i in list[:-1]: list2.append(int(i))
except ValueError: print list;kill
return list2
def importEnsExonStructureData(species):
start_time = time.time(); global ensembl_annotations; global ensembl_exon_data; ensembl_exon_data={}; global ensembl_gene_coordinates
global ensembl_gene_exon_db; ensembl_gene_exon_db={}; global ensembl_exon_pairs; global coordinate_gene_count_db
global ensembl_transcript_structure_db; ensembl_transcript_structure_db={}; global ens_transcript_gene_db; ens_transcript_gene_db={}
global ensembl_const_exon_db; ensembl_const_exon_db = {}
###Simple function to import and organize exon/transcript data
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
fn=filepath(filename); ensembl_gene_coordinates={}; ensembl_annotations={}; x=0
transcript_exon_db = {}; initial_junction_db = {}; ensembl_transcript_exons={}; coordinate_gene_count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if 'Chromosome' in t[0]: type = 'old'###when using older builds of EnsMart versus BioMart
else: type = 'current'
x=1
else:
try: gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
except ValueError: print t;kill
###switch exon-start and stop if in the reverse orientation
if strand == '-1': strand = '-'
else: strand = '+'
if '_' in chr: c = string.split(chr,'_'); chr = c[0]
exon_end = int(exon_end); exon_start = int(exon_start)
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if 'ENS' in gene: ens_exonid_data = string.split(ens_exonid,'.'); ens_exonid = ens_exonid_data[0]
if test == 'yes':
if gene in test_gene: proceed = 'yes'
else: proceed = 'no'
else: proceed = 'yes'
if abs(exon_end-exon_start)>0 and proceed == 'yes':
###Create temporary databases storing just exon and just trascript and then combine in the next block of code
try: ensembl_gene_coordinates[gene].append(exon_start)
except KeyError: ensembl_gene_coordinates[gene] = [exon_start]
try: coordinate_gene_count_db[gene].append([exon_start,exon_end])
except KeyError: coordinate_gene_count_db[gene] = [[exon_start,exon_end]]
ensembl_gene_coordinates[gene].append(exon_end)
ensembl_annotations[gene] = chr,strand
ensembl_exon_data[(chr,exon_start,exon_end)] = ens_exonid
try:ensembl_gene_exon_db[gene]+= [ens_exonid]
except KeyError: ensembl_gene_exon_db[gene] = [ens_exonid]
try: ensembl_transcript_exons[ens_transcriptid,strand].append((exon_start,ens_exonid))
except KeyError: ensembl_transcript_exons[ens_transcriptid,strand] = [(exon_start,ens_exonid)]
try: ensembl_transcript_structure_db[ens_transcriptid].append((chr,exon_start,exon_end))
except KeyError: ensembl_transcript_structure_db[ens_transcriptid] = [(chr,exon_start,exon_end)]
ens_transcript_gene_db[ens_transcriptid] = gene
ensembl_const_exon_db[ens_exonid] = constitutive_exon
end_time = time.time(); time_diff = int(end_time-start_time)
print filename,"parsed in %d seconds" % time_diff
###Sort exon info in the transcript to store pairs of Ensembl exons to find unique pairs of exons from UCSC
ensembl_exon_pairs={}
for (transcript,strand) in ensembl_transcript_exons:
a = ensembl_transcript_exons[(transcript,strand)]; a.sort()
if strand == '-': a.reverse()
index=0
try:
while index<len(a):
exon_pair = a[index][1],a[index+1][1]
ensembl_exon_pairs[exon_pair]=[]; index+=1
except IndexError: index+=1
ensembl_chr_coordinate_db={}
for gene in ensembl_gene_coordinates:
a = ensembl_gene_coordinates[gene]; a.sort()
gene_start = a[0]; gene_stop = a[-1]
chr,strand = ensembl_annotations[gene]
if chr in ensembl_chr_coordinate_db:
ensembl_gene_coordinates2 = ensembl_chr_coordinate_db[chr]
ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
else:
ensembl_gene_coordinates2={}; ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
ensembl_chr_coordinate_db[chr]=ensembl_gene_coordinates2
return ensembl_chr_coordinate_db
def makeUnique(item):
db1={}; list1=[]
for i in item: db1[i]=[]
for i in db1: list1.append(i)
list1.sort()
return list1
def mergeFragmentedExons(exon_coordiantes,strand):
###If the intron between two exons is < 10bp, make one exon
index=0; merged='no'
#print len(exon_coordiantes),exon_coordiantes,strand,'\n'
while index<(len(exon_coordiantes)-1):
deleted = 'no'
if strand == '-':
(stop1,start1) = exon_coordiantes[index]
(stop2,start2) = exon_coordiantes[index+1]
else:
(start1,stop1) = exon_coordiantes[index]
(start2,stop2) = exon_coordiantes[index+1]
if abs(start2-stop1)<9:
if strand == '+': new_exon = (start1,stop2)
else: new_exon = (stop2,start1)###for neg_strand
exon_coordiantes[index+1] = new_exon; del exon_coordiantes[index]
deleted = 'yes'; merged = 'yes'
index+=1
if deleted == 'yes':
if index == (len(exon_coordiantes)):break
else: index-=1 ###reset this since the number of elements has changed
exon_coordiantes = makeUnique(exon_coordiantes)
if strand == '-': exon_coordiantes.reverse()
#print len(exon_coordiantes),exon_coordiantes,strand;kill
return exon_coordiantes,merged
def importUCSCExonStructureData(species):
start_time = time.time(); global ucsc_annotations; global input_gene_file
###Simple function to import and organize exon/transcript data
filename = 'AltDatabase/ucsc/'+species+'/all_mrna.txt'; input_gene_file = filename; merged_ac_count=0;ac_count=0
fn=filepath(filename); ucsc_gene_coordinates={}; transcript_structure_db={}; ucsc_interim_gene_transcript_db={}
ucsc_annotations={}; ucsc_transcript_coordinates={}; temp_data_db={}; accession_count={}; clusterid_count={}
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
coordinates = t[-1]; coordinates = covertStrToListInt(coordinates)
exon_size_list = t[-3]; exon_size_list = covertStrToListInt(exon_size_list) ##coordinates and are the same length
strand = t[9]; accession = t[10]; clusterid = t[0]; chr = t[14][3:]
if '_' in chr: c = string.split(chr,'_'); chr = c[0]
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
unique_geneid = clusterid,strand,chr ###can have strand specific isoforms
index=0; exon_coordiantes = []
while index<len(coordinates):
exon_start = coordinates[index]; exon_stop = coordinates[index]+exon_size_list[index]
exon_coordiantes.append((exon_start+1,exon_stop))
try: ###Below code is available if we want to join exons that have very small introns (really one exon)... however, EnsemblImport will agglomerate these exons and ignore the splice event (if in the same exon)
if (coordinates[index+1]-exon_stop)<gap_length: null = []#print accession, exon_stop,coordinates;kill
except IndexError: null=[]
index+=1
if strand == '-': exon_coordiantes.reverse()
#print accession,coordinates
exon_coordiantes,merged = mergeFragmentedExons(exon_coordiantes,strand)
merged='no'
if merged=='yes': merged_ac_count+=1
temp_data_db[accession] = unique_geneid,strand,exon_coordiantes,chr
try: accession_count[accession]+=1
except KeyError: accession_count[accession]=1
clusterid_count[unique_geneid] = []
ac_count+=1
print len(clusterid_count), 'clusters imported'
print merged_ac_count, "transcripts had atleast two exons merged into one, out of",ac_count
for accession in temp_data_db:
if accession_count[accession]==1: ###Why would an accession have multiple entries: shouldn't but does
unique_geneid,strand,exon_coordiantes,chr = temp_data_db[accession]
###Add the first and list positions in the transcript
ucsc_annotations[unique_geneid] = chr ###don't include strand, since transcripts in a cluster can be on different strands
try: ucsc_gene_coordinates[unique_geneid]+=[exon_coordiantes[0][0]]
except KeyError: ucsc_gene_coordinates[unique_geneid]=[exon_coordiantes[0][0]]
ucsc_gene_coordinates[unique_geneid]+=[exon_coordiantes[-1][1]]
try: ucsc_transcript_coordinates[unique_geneid]+=[exon_coordiantes[0][0]]
except KeyError: ucsc_transcript_coordinates[accession]=[exon_coordiantes[0][0]]
ucsc_transcript_coordinates[accession]+=[exon_coordiantes[-1][1]]
try: ucsc_interim_gene_transcript_db[unique_geneid].append(accession)
except KeyError: ucsc_interim_gene_transcript_db[unique_geneid] = [accession]
transcript_structure_db[accession] = exon_coordiantes
end_time = time.time(); time_diff = int(end_time-start_time)
print filename,"parsed in %d seconds" % time_diff
###Build a gene cluster level database (start and stop coordinates) for UCSC clusters
ucsc_chr_coordinate_db={}
for geneid in ucsc_gene_coordinates:
strand = geneid[1]
a = ucsc_gene_coordinates[geneid]; a.sort()
gene_start = a[0]; gene_stop = a[-1]
chr = ucsc_annotations[geneid]
if chr in ucsc_chr_coordinate_db:
ucsc_gene_coordinates2 = ucsc_chr_coordinate_db[chr]
ucsc_gene_coordinates2[(gene_start,gene_stop)] = geneid,strand
else:
ucsc_gene_coordinates2={}; ucsc_gene_coordinates2[(gene_start,gene_stop)] = geneid,strand
ucsc_chr_coordinate_db[chr] = ucsc_gene_coordinates2
return ucsc_chr_coordinate_db,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates
def getChromosomalOveralap(ucsc_chr_db,ensembl_chr_db):
print len(ucsc_chr_db),len(ensembl_chr_db); start_time = time.time()
"""Find transcript_clusters that have overlapping start positions with Ensembl gene start and end (based on first and last exons)"""
###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
y = 0; l =0; ensembl_transcript_clusters={}; no_match_list=[]
###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)
for chr in ucsc_chr_db:
ucsc_db = ucsc_chr_db[chr]
try:
for (bp1,ep1) in ucsc_db:
x = 0
gene_clusterid,ucsc_strand = ucsc_db[(bp1,ep1)]
try:
ensembl_db = ensembl_chr_db[chr]
for (bp2,ep2) in ensembl_db:
y += 1; ensembl,ens_strand = ensembl_db[(bp2,ep2)]
if ucsc_strand == ens_strand:
###if the two gene location ranges overlapping
##########FORCE UCSC mRNA TO EXIST WITHIN THE SPACE OF ENSEMBL TO PREVENT TRANSCRIPT CLUSTER EXCLUSION IN ExonArrayEnsemblRules
add = 0
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
#if ((bp1 >= (bp2-bp_offset)) and ((ep2+bp_offset) >= ep1)): a = ''
#else: print gene_clusterid,ensembl,bp1,bp2,ep1,ep2;kill
x = 1
try: ensembl_transcript_clusters[gene_clusterid].append(ensembl)
except KeyError: ensembl_transcript_clusters[gene_clusterid] = [ensembl]
l += 1
except KeyError: null=[]; #print chr, 'not found'
if x == 0: no_match_list.append(gene_clusterid)
except ValueError:
for y in ucsc_db: print y;kill
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC genes matched up to Ensembl in %d seconds" % time_diff
print "UCSC Transcript Clusters (or accession numbers) overlapping with Ensembl:",len(ensembl_transcript_clusters)
print "With NO overlapp",len(no_match_list)
return ensembl_transcript_clusters,no_match_list
def getChromosomalOveralapSpecific(ucsc_db,ensembl_db):
print len(ucsc_db),len(ensembl_db); start_time = time.time()
"""Find transcript_clusters that have overlapping start positions with Ensembl gene start and end (based on first and last exons)"""
###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
y = 0; l =0; ensembl_transcript_clusters={}; no_match_list=[]
###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)
for key in ucsc_db:
(chr,bp1,ep1,accession) = key
x = 0
ucsc_chr,ucsc_strand,ensembls = ucsc_db[key]
status = 'go'
for ensembl in ensembls:
y += 1; bp2,ep2 = ensembl_db[ensembl]
add = 0
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
#if ((bp1 >= bp2) and (ep2 >= ep1)):
x = 1
try: ensembl_transcript_clusters[accession].append(ensembl)
except KeyError: ensembl_transcript_clusters[accession] = [ensembl]
l += 1; status = 'break'
if x == 0: no_match_list.append(accession)
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC genes matched up to Ensembl in %d seconds" % time_diff
print "UCSC mRNA accession numbers overlapping with Ensembl:",len(ensembl_transcript_clusters)
print "With NO overlapp",len(no_match_list)
return ensembl_transcript_clusters,no_match_list
def customDeepCopy(db):
db2={}
for i in db:
for e in db[i]:
try: db2[i].append(e)
except KeyError: db2[i]=[e]
return db2
def identifyNewExonsForAnalysis(ensembl_transcript_clusters,no_match_list,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates):
###There's currently a lot of data here we are not using: Ensembl exons and transcript data
###Currently deemed as "over-kill" to use this.
all_accession_gene_associations = {} ###record this for export: needed to align all transcripts to genes for probeset seqeunce alignment
ucsc_multiple_alignments = {}; ensembl_gene_accession_structures={}
for clusterid in ensembl_transcript_clusters:
ens_geneids = ensembl_transcript_clusters[clusterid]
ucsc_transcripts = ucsc_interim_gene_transcript_db[clusterid]
for accession in ucsc_transcripts:
try: all_accession_gene_associations[accession] += ens_geneids
except KeyError: all_accession_gene_associations[accession] = ens_geneids
if len(ens_geneids)>1: ###If a cluster ID associates with multiple Ensembl IDs
chr = ucsc_annotations[clusterid]
strand = clusterid[1]
for accession in ucsc_transcripts:
if test == 'yes': print "Multiple Ensembls for",accession
a = ucsc_transcript_coordinates[accession]; a.sort(); trans_start = a[0];trans_stop = a[-1]
ucsc_multiple_alignments[(chr,trans_start,trans_stop,accession)] = chr,strand,ens_geneids
#if (chr,trans_start,trans_stop) == ('8', 113499990, 113521567): print accession,chr,strand,ens_geneids
#if accession == 'AK049467': print 'A',ens_geneids,(chr,trans_start,trans_stop);kill
else:
for ens_geneid in ens_geneids:
transcripts = ucsc_interim_gene_transcript_db[clusterid]
for accession in transcripts:
if test == 'yes': print "One Ensembl for",accession
exon_structure_list = transcript_structure_db[accession]
try: ensembl_gene_accession_structures[ens_geneid].append((accession,exon_structure_list))
except KeyError: ensembl_gene_accession_structures[ens_geneid]= [(accession,exon_structure_list)]
###Create a new database for ensembl gene boundaries indexed by ensembl id for quicker reference in the faster lookup chr overlap function
ensembl_gene_coordinates2={}
for chr in ensembl_chr_coordinate_db:
ensembl_gene_coordinates_data = ensembl_chr_coordinate_db[chr]
for (bp,ep) in ensembl_gene_coordinates_data:
ensembl,strand = ensembl_gene_coordinates_data[(bp,ep)]
ensembl_gene_coordinates2[ensembl] = (bp,ep)
###Add all Accession #'s, for which the cluster ID did not correspond to a gene and re-run the chr overlap function
accession_chr_coordinate_db={}
for clusterid in no_match_list:
ucsc_transcripts = ucsc_interim_gene_transcript_db[clusterid]
chr = ucsc_annotations[clusterid]
strand = clusterid[1]
for accession in ucsc_transcripts:
a = ucsc_transcript_coordinates[accession]; a.sort(); trans_start = a[0];trans_stop = a[-1]
if chr in accession_chr_coordinate_db:
accession_gene_coordinate_db = accession_chr_coordinate_db[chr]
accession_gene_coordinate_db[(trans_start,trans_stop)] = accession,strand
else:
accession_gene_coordinate_db={}
accession_gene_coordinate_db[(trans_start,trans_stop)] = accession,strand
accession_chr_coordinate_db[chr] = accession_gene_coordinate_db
###Re-run this query with the accession numbers rather than the cluster number (may take a while)
new_ensembl_transcript_clusters,new_no_match_list = getChromosomalOveralap(accession_chr_coordinate_db,ensembl_chr_coordinate_db)
###Add the single gene accession associations to ensembl_gene_accession_structures
for accession in new_ensembl_transcript_clusters:
ens_geneids = new_ensembl_transcript_clusters[accession]
try: all_accession_gene_associations[accession] += ens_geneids
except KeyError: all_accession_gene_associations[accession] = ens_geneids
if len(ens_geneids)>1 and export_all_associations=='no': ###If a cluster ID associates with multiple Ensembl IDs
null = []###don't do anything with these
else:
for ens_geneid in ens_geneids:
ens_geneid = ens_geneids[0]
exon_structure_list = transcript_structure_db[accession]
try: ensembl_gene_accession_structures[ens_geneid].append((accession,exon_structure_list))
except KeyError: ensembl_gene_accession_structures[ens_geneid]= [(accession,exon_structure_list)]
###Re-run the chromosomal overlap analysis specifically on transcripts, where the gene overlapped with multiple ensembls
ensembl_transcript_clusters2,no_match_list2 = getChromosomalOveralapSpecific(ucsc_multiple_alignments,ensembl_gene_coordinates2)
for accession in ensembl_transcript_clusters2:
ens_geneids = ensembl_transcript_clusters2[accession]
#if accession == 'AK049467': print ens_geneids;kill
all_accession_gene_associations[accession] = ens_geneids ###Write over existing if a more specific set of gene associations found
if len(ens_geneids)==1 or export_all_associations=='yes': ###Otherwise there are multiple associations
for ens_geneid in ens_geneids:
exon_structure_list = transcript_structure_db[accession]
###This is the list of Ensembl genes to GenBank accessions and exon coordiantes
try: ensembl_gene_accession_structures[ens_geneid].append((accession,exon_structure_list))
except KeyError: ensembl_gene_accession_structures[ens_geneid]= [(accession,exon_structure_list)]
###Verify accession to gene associations for multiple associations or pick the one propper gene call among several incorrect
"""A problem is that if an Ensembl pseudo-transcript (not a real gene), with no exons overlapping with UCSC transcript exists, the accession
could not be annotated with a gene, but this is not ideal, since the exons in the transcript may just overlap with one gene"""
all_accession_gene_associations2 = []; number_of_associated_exons={}; removed=[]; ensembl_gene_accession_structures_deleted={}; exon_annotation_del_db={}
for accession in all_accession_gene_associations:
exon_structure = transcript_structure_db[accession] ###coordinates for 'exons' provided by UCSC
unique_genes = unique.unique(all_accession_gene_associations[accession])
ensembl_gene_exons_temp={}
for gene in unique_genes:
chr,strand = ensembl_annotations[gene]
for exon_coordinates in exon_structure:
exons = ensembl_gene_exon_db[gene] ###Ensembl exonids for this gene
new_exon_coordinates = chr,exon_coordinates[0],exon_coordinates[1] ###create a exon coordiante tuple analagous to the one created for Ensembl
if new_exon_coordinates in ensembl_exon_data: ###Therefore this is an Ensembl aligning exon (same start and stop)
ensembl_exon = ensembl_exon_data[new_exon_coordinates] ###Get the id for this exon
if ensembl_exon in exons: ###Since this exon could be in any gene, check to make sure it's specific for just this one
try: ensembl_gene_exons_temp[gene].append(ensembl_exon)
except KeyError: ensembl_gene_exons_temp[gene] = [ensembl_exon]
#if accession == 'X97298': print accession, unique_genes, ensembl_gene_exons_temp, len(ensembl_gene_exons_temp);kill
#if strand == '+': print accession, unique_genes, ensembl_gene_exons_temp, len(ensembl_gene_exons_temp);kill
if len(ensembl_gene_exons_temp) == 1: ###Therefore, only one Ensembl gene contained overlapping exons
for gene in ensembl_gene_exons_temp:
all_accession_gene_associations[accession] = [gene]
number_of_associated_exons[gene,accession] = len(ensembl_gene_exons_temp[gene])
if len(unique_genes)>1: ###If multiple genes, then this accession number has not been updated in our main accession to Ensembl Dbase
exon_structure_list = transcript_structure_db[accession]
try: ensembl_gene_accession_structures[gene].append((accession,exon_structure_list))
except KeyError: ensembl_gene_accession_structures[gene]= [(accession,exon_structure_list)]
elif len(ensembl_gene_exons_temp) == 0 or len(ensembl_gene_exons_temp) > 1:
###Therfore, no Ensembl exon overlaps with the transcript or exons overlapp with several genes. If in the main accession to Ensembl Dbase, delete it
for gene in unique_genes:
if gene in ensembl_gene_accession_structures and export_all_associations=='no':
accession_data_list = ensembl_gene_accession_structures[gene]
index = 0
for accession_data in accession_data_list:
if accession in accession_data:
del accession_data_list[index]; removed.append(accession)
### add all of the gene accession info to a new db to look for overlap with UCSC annotated alt events
if len(ensembl_gene_exons_temp) == 0 and len(unique_genes) == 1: ### This occurs if a transcript has no overlaping ensembl exons, but does contain an annotated event according to UCSC
all_accession_gene_associations[accession] = [gene] ###although the entry is deleted, probably no issue with exporting the data to LinkEST
if len(unique_genes)==1: ###If multiple genes, then this accession number has not been updated in our main accession to Ensembl Dbase
exon_structure_list = transcript_structure_db[accession]
try: ensembl_gene_accession_structures_deleted[gene].append((accession,exon_structure_list))
except KeyError: ensembl_gene_accession_structures_deleted[gene]= [(accession,exon_structure_list)]
chr,strand = ensembl_annotations[gene]
exon_annotation_del_db[(gene,chr,strand)] = exon_structure_list ###This should mimic the Ensembl database used for alignToKnownAlt
index += 1
###Check to see if any of the unique accession-gene associations that didn't have an ensembl exon overlap with a known UCSC alt-event
###first update gene coordiantes with approved UCSC mRNAs (Ensembl frak's up sometmes and actually makes mRNAs too short)
for gene in ensembl_gene_accession_structures:
for (accession,exon_structure_list) in ensembl_gene_accession_structures[gene]:
for exon_info in exon_structure_list:
exon_start = exon_info[0]; exon_stop = exon_info[1]
ensembl_gene_coordinates[gene].append(exon_start); ensembl_gene_coordinates[gene].append(exon_stop)
try: ucsc_splicing_annot_db = alignToKnownAlt.importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_del_db)
except Exception: ucsc_splicing_annot_db={} # ucsc_splicing_annot_db[ensembl].append((start,stop,annotation_str))
for gene in ensembl_gene_accession_structures_deleted:
if gene in ucsc_splicing_annot_db:
for (accession,exon_structure_list) in ensembl_gene_accession_structures_deleted[gene]:
add = []
for ucsc_exon_info in ucsc_splicing_annot_db[gene]:
bp1 = ucsc_exon_info[0]; ep1 = ucsc_exon_info[1]; annotation = ucsc_exon_info[2]
for exon_info in exon_structure_list:
bp2 = exon_info[0]; ep2 = exon_info[1]
#if accession == 'BC092441':
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add.append(annotation) ###if the start or stop of the UCSC Alt is inside the UCSC mRNA exon start and stop
elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add.append(annotation) ###opposite
if len(add)>0:
try: ensembl_gene_accession_structures[gene].append((accession,exon_structure_list))
except KeyError: ensembl_gene_accession_structures[gene]= [(accession,exon_structure_list)]
print len(removed), "accessions removed from analysis"
###Export all possible transcript to ensembl anntotations
"""This is used by mRNASeqAlign.py for figuring out which UCSC mRNAs can be specifically aligned to which Ensembl genes, based on reciprocal-junctions"""
export_file = string.replace(input_gene_file,'all',species+'_UCSC-accession-to-gene')
fn=filepath(export_file); data = open(fn,'w')
for accession in all_accession_gene_associations:
unique_genes = unique.unique(all_accession_gene_associations[accession])
unique_genes = string.join(unique_genes,'|')
if (unique_genes,accession) in number_of_associated_exons:
number_of_ens_exons = number_of_associated_exons[(unique_genes,accession)]
else: number_of_ens_exons = 'NA'
values = accession +'\t'+ unique_genes +'\t'+ str(number_of_ens_exons)+'\n'
data.write(values)
data.close()
return ensembl_gene_accession_structures
def matchUCSCExonsToEnsembl(ensembl_gene_accession_structures):
global distal_ens_exon_pos_db; distal_ens_exon_pos_db = {}
###Use this database to determine which start and stop exon from Ensembl correspond to UCSC start and stop exons (with different distal positions and same junctions)
for transcript in ensembl_transcript_structure_db:
ens_geneid = ens_transcript_gene_db[transcript]
chr,strand = ensembl_annotations[ens_geneid]
"""
###Performs the same operation as below but just for perifpheral ensembl exons, not all of them (less conservative)
a = ensembl_transcript_structure_db[transcript]; a.sort()
if strand == '+':
start_pos = a[0]; start_ensembl_exon = ensembl_exon_data[a[0]]
end_pos = a[-1]; stop_ensembl_exon = ensembl_exon_data[a[-1]]
else:
start_pos = a[-1]; start_ensembl_exon = ensembl_exon_data[a[-1]]
end_pos = a[0]; stop_ensembl_exon = ensembl_exon_data[a[0]]
v = [(start_pos,start_ensembl_exon),(end_pos,stop_ensembl_exon)]
try: distal_ens_exon_pos_db[ens_geneid] += v
except KeyError: distal_ens_exon_pos_db[ens_geneid] = v"""
trans_data = ensembl_transcript_structure_db[transcript]; trans_data.sort()
for a in trans_data:
###Do this for all exons, since we don't care if an mRNA from UCSC starts in the middl of the the 3rd exon
#ensembl_exon_data[(chr,exon_start,exon_end)] = ens_exonid
pos = a; ensembl_exon = ensembl_exon_data[a]
v = [(pos,ensembl_exon)]
try: distal_ens_exon_pos_db[ens_geneid] += v
except KeyError: distal_ens_exon_pos_db[ens_geneid] = v
distal_ens_exon_pos_db = eliminate_redundant_dict_values(distal_ens_exon_pos_db)
###Determine which exons are constitutive, by simply counting their number in each transcript
constitutive_gene_db={}; coordinate_to_ens_exon = {}
for ens_geneid in ensembl_gene_accession_structures:
a = ensembl_gene_accession_structures[ens_geneid]
coordinate_count_db={}
chr,strand = ensembl_annotations[ens_geneid]
for (accession,exon_structure) in a:
index=1
for exon_coordinates in exon_structure:
###check whether the exon is an ensembl exon
exon_coordinates = list(exon_coordinates)
new_exon_coordinates = chr,exon_coordinates[0],exon_coordinates[1]
ensembl_exon = 'null'
#if new_exon_coordinates == ('1', 112109882, 112111042): print index, accession,len(exon_structure);kill
try:
ensembl_exon = ensembl_exon_data[new_exon_coordinates]
#if ensembl_exon == 'ENSE00001473402': print new_exon_coordinates;kill
coordinate_to_ens_exon[new_exon_coordinates] = ensembl_exon
except KeyError:
if len(exon_structure)>1: ###Ensures we don't do this for single exon transcripts, where this would be in-appropriate
if index == 1 and strand == '+': #or index == len(exon_structure)
for (pos,exon) in distal_ens_exon_pos_db[ens_geneid]:
cr,start,stop = pos
if exon_coordinates[1] == stop: ensembl_exon = exon; exon_coordinates[0] = start #; print 'a'
elif index == 1 and strand == '-': #or index == len(exon_structure)
for (pos,exon) in distal_ens_exon_pos_db[ens_geneid]:
cr,start,stop = pos
if exon_coordinates[0] == start: ensembl_exon = exon; exon_coordinates[1] = stop #; print 'b'
elif index == len(exon_structure) and strand == '+': #or index == len(exon_structure)
for (pos,exon) in distal_ens_exon_pos_db[ens_geneid]:
cr,start,stop = pos
if exon_coordinates[0] == start: ensembl_exon = exon; exon_coordinates[1] = stop #; print 'c'
elif index == len(exon_structure) and strand == '-': #or index == len(exon_structure)
for (pos,exon) in distal_ens_exon_pos_db[ens_geneid]:
cr,start,stop = pos
if exon_coordinates[1] == stop: ensembl_exon = exon; exon_coordinates[0] = start #; print 'd',exon_coordinates,start,stop,ensembl_exon;kill
if ensembl_exon != 'null':
#print accession,ensembl_exon,exon_coordinates;kill
coordinate_to_ens_exon[new_exon_coordinates] = ensembl_exon
index+=1
###count each exon in all transcripts
exon_coordinates = tuple(exon_coordinates)
try: coordinate_count_db[exon_coordinates]+=1
except KeyError: coordinate_count_db[exon_coordinates]=1
count_list=[]
###process for identifying putative constitutive IDs. Found cases were it eliminate real constitutive exons (check out SLK and EF139853...other transcripts eliminated due to ncRNA overlap)
"""
print coordinate_count_db,'\n'
###Now do this for Ensembl gene exons
for exon_coordinates in coordinate_gene_count_db[ens_geneid]:
try: coordinate_count_db[tuple(exon_coordinates)]+=1
except KeyError: coordinate_count_db[tuple(exon_coordinates)]=1
print coordinate_count_db;kill
for exon_coordinates in coordinate_count_db:
count = coordinate_count_db[exon_coordinates]
count_list.append((count,exon_coordinates))
count_list.sort(); count_list.reverse(); max_count = count_list[0][0]; constitutive=[]
for (count,coor) in count_list:
if count == max_count: constitutive.append(coor)
constitutive_gene_db[ens_geneid] = constitutive"""
return ensembl_gene_accession_structures,constitutive_gene_db,coordinate_to_ens_exon
def exportNullDatabases(species):
input_gene_file = 'AltDatabase/ucsc/'+species+'/all_mrna.txt'
export_file = string.replace(input_gene_file,'all',species+'_UCSC_transcript_structure')
data = export.ExportFile(export_file)
data.close()
export_file2 = string.replace(input_gene_file,'all',species+'_UCSC_transcript_structure_filtered')
data = export.ExportFile(export_file2)
data.close()
export_file = string.replace(export_file,'mrna','COMPLETE-mrna')
data = export.ExportFile(export_file)
data.close()
def exportExonClusters(ensembl_gene_accession_structures,constitutive_gene_db,coordinate_to_ens_exon,species):
export_file = string.replace(input_gene_file,'all',species+'_UCSC_transcript_structure'); accessions_exlcuded=[]; accessions_included=0
if export_all_associations == 'yes': ### Ensures that the file created for EnsemblImport will not be over-written by that for Domain analyses
export_file = string.replace(export_file,'mrna','COMPLETE-mrna')
fn=filepath(export_file); data = open(fn,'w')
title = ['Ensembl Gene ID','Chromosome','Strand','Exon Start (bp)','Exon End (bp)','Custom Exon ID','Constitutive Exon','NCBI Accession']
title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in ensembl_gene_accession_structures:
chr,strand = ensembl_annotations[ens_geneid]
common_values = [ens_geneid,chr,strand]
for (accession,exon_structure) in ensembl_gene_accession_structures[ens_geneid]:
index=1
#constitutive_coordinates = constitutive_gene_db[ens_geneid]
ens_exon_count=[]###See if we should export this transcript (if it contains no unique exons)
for (exon_start,exon_stop) in exon_structure:
try:
exonid = coordinate_to_ens_exon[(chr,exon_start,exon_stop)]
###Verify that the exon corresponds to that gene (some Ensembl exon regions belong to more than one gene)
if exonid in ensembl_gene_exon_db[ens_geneid]: ens_exon_count.append(exonid)
except KeyError: null = []
""" LEGACY - REMOVE TRANSCRIPTS FOR WHICH THERE ARE ONLY ENSEMBL EXONS: This is an issue since we really want to get rid of
transcripts with only Ensembl Junctions (too strict). This option was removed on 11.22.2017 as it removed novel isoforms
with known exons """
#if len(ens_exon_count) != len(exon_structure):
accessions_included+=1
for (exon_start,exon_stop) in exon_structure:
###used to try to emperically determine which exons are constitutive... instead, just trust Ensembl
#if (exon_start,exon_stop) in constitutive_coordinates: constitutive_call = '1'
#else: constitutive_call = '0'
try:
exonid = coordinate_to_ens_exon[(chr,exon_start,exon_stop)]
if exonid in ensembl_gene_exon_db[ens_geneid]: exonid = exonid ###Perform the same check as above
else: exonid = accession+'-'+str(index)
except KeyError: exonid = accession+'-'+str(index) ###custom ID designating the relative exon position in the exon_structure
if exonid in ensembl_const_exon_db: constitutive_call = ensembl_const_exon_db[exonid]
else: constitutive_call = '0'
values = common_values+[str(exon_start),str(exon_stop),exonid,constitutive_call,accession]
index+=1
values = string.join(values,'\t')+'\n'
data.write(values)
#else: accessions_exlcuded.append(accession)
data.close()
if export_all_associations == 'yes': ### Used in mRNASeqAlign.py
print len(accessions_exlcuded), "Accession numbers excluded (same as Ensembl transcript), out of",(accessions_included+len(accessions_exlcuded))
export_file2 = string.replace(input_gene_file,'all',species+'_UCSC-accession-eliminated')
fn=filepath(export_file2); data = open(fn,'w')
for ac in accessions_exlcuded: data.write(ac+'\n')
data.close()
print 'data written to:',export_file
def exportSimple(filtered_data,title,input_gene_file):
export_file = string.replace(input_gene_file,'all',species+'_UCSC_transcript_structure_filtered')
fn=filepath(export_file); data = open(fn,'w')
data.write(title)
for index,line in filtered_data: data.write(line)
data.close()
print 'data written to:',export_file
def exportSimpleDB(results_list,title,output_dir):
export_file = string.replace(input_gene_file,'all',species+'_UCSC_transcript_structure_filtered')
fn=filepath(export_file); data = open(fn,'w')
data.write(string.join(title,'\t')+'\n')
for items in results_list: data.write(string.join(items,'\t')+'\n')
data.close()
print 'data written to:',export_file
def filterBuiltAssociations(species):
input_gene_file = 'AltDatabase/ucsc/'+species+'/all_mrna.txt'
filename = string.replace(input_gene_file,'all_',species+'_UCSC_transcript_structure_')
###Re-import the data and filter it to remove junctions that should be present in the Ensembl database (ensembl-ensembl junction)
fn=filepath(filename); x=0
raw_data = {}; global accession_db; accession_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: title = line; x+=1 ###first line
else:
gene, chr, strand, exon_start, exon_end, exonid, constitutive_exon, accession = t
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
try: raw_data[accession].append([exonid,x,line])
except KeyError: raw_data[accession] = [[exonid,x,line]]
#y = EnsemblImport.ExonStructureData(gene,chr,strand,exon_start,exon_end,constitutive_exon,exonid,accession)
x+=1
keep=[]; k=0
###Remove the terminal portions of the transcripts that are comprised ONLY of continuous Ensembl exon-exon pairs
for accession in raw_data:
#accession = 'AK027479'
index=0; x = raw_data[accession]
while index<(len(x)-1): ###for each exon in the accession
try:
y,n,n = x[index]
s,n,n = x[index+1] ###Next exon in transcript
#elif 'ENS' in y and 'ENS' in s: k+=1 #; print (y,s),accession;kill
if (y,s) in ensembl_exon_pairs or (s,y) in ensembl_exon_pairs:
x = x[index+1:]; index = -1
else: break
except IndexError: break
index+=1
index=0; x.reverse()
while index<(len(x)-1): ### for each exon in the accession
try:
y,n,n = x[index]
s,n,n = x[index+1] ###Next exon in transcript
#elif 'ENS' in y and 'ENS' in s: k+=1#; print (y,s),accession;kill
if (y,s) in ensembl_exon_pairs or (s,y) in ensembl_exon_pairs:
x = x[index+1:]; index = -1
else:
x.reverse()
raw_data[accession] = x; break
except IndexError: break
index+=1
for (exonid,i,line) in raw_data[accession]:
keep.append((i,line))
keep = unique.unique(keep); keep.sort()
print k, "exon pairs that contained two ensembl exons not paired in an Ensembl transcript"
if export_all_associations == 'no': ### Only used by EnsemblImport.py
print 'post filtering',input_gene_file
exportSimple(keep,title,input_gene_file)
def returnConstitutive(species):
""" Note: This function is used only for internal analyses and is NOT utilized for the AltAnalyze build process"""
input_gene_file = 'AltDatabase/ucsc/'+species+'/all_mrna.txt'
filename = string.replace(input_gene_file,'all',species+'_UCSC_transcript_structure')
###Re-import the data and filter it to remove junctions that should be present in the Ensembl database (ensembl-ensembl junction)
fn=filepath(filename)
constitutive_exon_db={}; gene_exon_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x+=1 ###first line
else:
gene, chr, strand, exon_start, exon_end, exonid, constitutive_exon, accession = t
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
try: gene_exon_db[gene].append(exonid)
except KeyError: gene_exon_db[gene] = [exonid]
constitutive_gene_exon_list=[]
for gene in gene_exon_db:
constitutive_exon_db={}
exons = gene_exon_db[gene]
for exon in exons:
try: constitutive_exon_db[exonid]+=1
except KeyError: constitutive_exon_db[exonid] = 1
count_list=[]; count_db={}
for exonid in constitutive_exon_db:
count = constitutive_exon_db[exonid]
count_list.append((count,exonid))
try: count_db[count].append(exonid)
except KeyError: count_db[count] = [exonid]
count_list.sort();
top_count = count_list[-1][0]; top_exon = count_list[-1][1]
bottom_count = count_list[-1][0]; bottom_exon = count_list[-1][1]
if top_count != bottom_count:
for constitutive_exon in count_db[top_count]:
constitutive_gene_exon_list.append((gene,constitutive_exon))
exportSimpleDB(constitutive_gene_exon_list,['Ensembl Gene ID','Constitutive Exon'],output_dir)
def getUCSCAssociations(Species):
global species; species = Species
global ensembl_gene_coordinates
ensembl_chr_coordinate_db = importEnsExonStructureData(species)
ucsc_gene_coordinates,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates = importUCSCExonStructureData(species)
ensembl_transcript_clusters,no_match_list = getChromosomalOveralap(ucsc_gene_coordinates,ensembl_chr_coordinate_db)
ensembl_gene_accession_structures,constitutive_gene_db = identifyNewExonsForAnalysis(ensembl_transcript_clusters,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates)
exportExonClusters(ensembl_gene_accession_structures,constitutive_gene_db,species)
def downloadFiles(ucsc_file_dir,output_dir):
try:
gz_filepath, status = update.download(ucsc_file_dir,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
except Exception: print ucsc_file_dir,'file not found at http://genome.ucsc.edu.'
def runUCSCEnsemblAssociations(Species,mRNA_Type,export_All_associations,run_from_scratch,force):
global species; species = Species; global mRNA_type; mRNA_type = mRNA_Type
global test; global bp_offset; global gap_length; global test_gene
global export_all_associations
bp_offset = 100 ###allowed excesive base pairs added to the distal ends of the Ensembl genes
gap_length = 12 ###maximum allowed gap length to qualify for merging exons
test = 'no'
test_gene = ['ENSMUSG00000022194']#,'ENSG00000154889','ENSG00000156026','ENSG00000148584','ENSG00000063176','ENSG00000126860'] #['ENSG00000105968']
counts = update.verifyFile('AltDatabase/ucsc/'+species +'/all_mrna.txt','counts') ### See if the file is already downloaded
if force == 'yes' or counts <9:
### Download mRNA structure file from website
import UI; species_names = UI.getSpeciesInfo()
species_full = species_names[species]
species_full = string.replace(species_full,' ','_')
output_dir = 'AltDatabase/ucsc/'+species + '/'
ucsc_mRNA_dir = update.download_protocol('http://hgdownload.cse.ucsc.edu/goldenPath/currentGenomes/'+species_full+'/database/all_mrna.txt.gz',output_dir,'')
knownAlt_dir = update.download_protocol('http://hgdownload.cse.ucsc.edu/goldenPath/currentGenomes/'+species_full+'/database/knownAlt.txt.gz',output_dir,'')
#knownAlt_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','knownAlt.txt.gz')
#ucsc_mRNA_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','all_mrna.txt.gz')
#downloadFiles(ucsc_mRNA_dir,output_dir); downloadFiles(knownAlt_dir,output_dir)
if run_from_scratch == 'yes':
global ensembl_chr_coordinate_db
export_all_associations = export_All_associations
ensembl_chr_coordinate_db = importEnsExonStructureData(species)
ucsc_chr_coordinate_db,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates = importUCSCExonStructureData(species)
ensembl_transcript_clusters,no_match_list = getChromosomalOveralap(ucsc_chr_coordinate_db,ensembl_chr_coordinate_db)
ensembl_gene_accession_structures = identifyNewExonsForAnalysis(ensembl_transcript_clusters,no_match_list,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates)
print 'ensembl_gene_accession_structures',len(ensembl_gene_accession_structures)
ensembl_gene_accession_structures,constitutive_gene_db,coordinate_to_ens_exon = matchUCSCExonsToEnsembl(ensembl_gene_accession_structures)
exportExonClusters(ensembl_gene_accession_structures,constitutive_gene_db,coordinate_to_ens_exon,species)
if export_all_associations == 'no': filterBuiltAssociations(species)
else:
if export_all_associations == 'no':
ensembl_chr_coordinate_db = importEnsExonStructureData(species) ###need this to get the unique Ensembl pairs
filterBuiltAssociations(species)
if __name__ == '__main__':
run_from_scratch = 'yes'; force = 'no'
Species = 'Mm'
species = Species
mRNA_Type = 'est'
mRNA_Type = 'mrna'
#returnConstitutive(species);kill
export_all_associations = 'no' ### YES only for protein prediction analysis
runUCSCEnsemblAssociations(Species,mRNA_Type,export_all_associations,run_from_scratch,force)
#AK049467
bp_offset = 100 ###allowed excesive base pairs added to the distal ends of the Ensembl genes
gap_length = 12 ###maximum allowed gap length to qualify for merging exons
test = 'no'
test_gene = ['ENSMUSG00000022194']#,'ENSG00000154889','ENSG00000156026','ENSG00000148584','ENSG00000063176','ENSG00000126860'] #['ENSG00000105968']
if run_from_scratch == 'yes':
global ensembl_chr_coordinate_db
ensembl_chr_coordinate_db = importEnsExonStructureData(species)
ucsc_chr_coordinate_db,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates = importUCSCExonStructureData(species)
ensembl_transcript_clusters,no_match_list = getChromosomalOveralap(ucsc_chr_coordinate_db,ensembl_chr_coordinate_db)
ensembl_gene_accession_structures = identifyNewExonsForAnalysis(ensembl_transcript_clusters,no_match_list,transcript_structure_db,ucsc_interim_gene_transcript_db,ucsc_transcript_coordinates)
print 'ensembl_gene_accession_structures',len(ensembl_gene_accession_structures)
ensembl_gene_accession_structures,constitutive_gene_db,coordinate_to_ens_exon = matchUCSCExonsToEnsembl(ensembl_gene_accession_structures)
exportExonClusters(ensembl_gene_accession_structures,constitutive_gene_db,coordinate_to_ens_exon,species)
if export_all_associations == 'no': filterBuiltAssociations(species)
else:
if export_all_associations == 'no':
ensembl_chr_coordinate_db = importEnsExonStructureData(species) ###need this to get the unique Ensembl pairs
filterBuiltAssociations(species)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/UCSCImport.py
|
UCSCImport.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from stats_scripts import statistics
import copy
import time
import export; reload(export)
import update
import traceback
################# General File Parsing Functions #################
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".all" or entry[-5:] == ".data" or entry[-3:] == ".fa": dir_list2.append(entry)
return dir_list2
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,str(search_term))
if len(files)<1: print search_term,'not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir, search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term in data_dir: matches.append(data_dir)
return matches
################# General Use Functions #################
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineDBs(db1,db2):
for i in db2:
try: db1[i]+=db2[i]
except KeyError: db1[i]=db2[i]
return db1
def eliminateRedundant(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
################# Sequence Parsing and Comparison #################
def simpleSeqMatchProtocol(probeset_seq_data,mRNA_seq):
""" Since it is possible for a probeset to be analyzed more than once for junction arrays
(see probeset_seq_data junction design), only analyze a probeset once."""
results=[]; probesets_analyzed = {}
for y in probeset_seq_data:
try: exon_seq = y.ExonSeq(); probeset = y.Probeset()
except AttributeError: probeset = y.Probeset(); print [probeset],'seq missing';kill
"""if probeset == 'G7203664@J946608_RC@j_at':
print exon_seq
print mRNA_seq;kill"""
if probeset not in probesets_analyzed:
if len(exon_seq)>10:
if exon_seq in mRNA_seq: call=1
elif array_type == 'exon':
if exon_seq[:25] in mRNA_seq: call=1
elif exon_seq[-25:] in mRNA_seq: call=1
else: call = 0
else: call = 0
#else: junction_seq = y.JunctionSeq()
results.append((call,probeset))
probesets_analyzed[probeset]=[]
return results
def matchTranscriptExonIDsToJunctionIDs(species,array_type,gene_junction_db):
""" Matches junctionIDs to precomputed transcript-level exonID strings - simpler and more accurate than importEnsemblTranscriptSequence"""
output_file = 'AltDatabase/'+species+'/SequenceData/output/'+array_type+'_coordinte-mRNA_alignments.txt'
dataw = export.ExportFile(output_file)
filename = 'AltDatabase/ensembl/'+species+'/mRNA-ExonIDs.txt'
fn=filepath(filename)
x = 0
all={}
found=[] ### Junctions found within known mRNAs
missing=[] ### Junctions cannot be found within known mRNAs
for line in open(fn,'rU').xreadlines():
data = line.strip()
gene,transcript,protein,exonIDs = string.split(data,'\t')
exonIDs += '|' ### such that the last exon is propperly searchable
if gene in gene_junction_db:
junctions_data = gene_junction_db[gene]
for jd in junctions_data:
all[jd.Probeset()]=[]
junctionIDs = string.split(jd.Probeset()+'|',':')[-1]
junctionIDs = string.replace(junctionIDs,'-','|') ### this is the format of the transcript ExonID string
if x==0: x=1 #; print junctionIDs, exonIDs
if junctionIDs in exonIDs:
dataw.write(string.join([jd.Probeset(),'1',transcript],'\t')+'\n')
found.append(jd.Probeset())
else:
dataw.write(string.join([jd.Probeset(),'0',transcript],'\t')+'\n')
dataw.close()
for junction in all:
if junction not in found:
if junction not in missing:
missing.append(junction)
return missing
def importEnsemblTranscriptSequence(Species,Array_type,probeset_seq_db):
global species; global array_type
species = Species; array_type = Array_type
start_time = time.time()
import_dir = '/AltDatabase/'+species+'/SequenceData' ### Multi-species file
g = GrabFiles(); g.setdirectory(import_dir)
seq_files = g.searchdirectory('cdna.all'); seq_files.sort(); filename = seq_files[-1]
output_file = 'AltDatabase/'+species+'/SequenceData/output/'+array_type+'_Ens-mRNA_alignments.txt'
dataw = export.ExportFile(output_file)
output_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/'+array_type+'_Ens_mRNA_seqmatches.txt'
datar = export.ExportFile(output_file)
print "Begining generic fasta import of",filename
fn=filepath(filename); sequence = ''; x = 0; count = 0; global gene_not_found; gene_not_found=[]; genes_found={}
for line in open(fn,'rU').xreadlines():
exon_start=1; exon_stop=1
try: data, newline= string.split(line,'\n')
except ValueError: continue
try:
if data[0] == '>':
if len(sequence) > 0:
gene_found = 'no'; count+=1
if ensembl_id in probeset_seq_db:
genes_found[ensembl_id]=[]; seq_type = 'full-length'
probeset_seq_data = probeset_seq_db[ensembl_id]; cDNA_seq = sequence[1:]; mRNA_length = len(cDNA_seq)
results = simpleSeqMatchProtocol(probeset_seq_data,cDNA_seq)
for (call,probeset) in results:
dataw.write(string.join([probeset,str(call),transid],'\t')+'\n')
###Save all sequences to the disk rather than store these in memory. Just select the optimal sequences later.
values = [transid,cDNA_seq]
values = string.join(values,'\t')+'\n'; datar.write(values); x+=1
else:
gene_not_found.append(ensembl_id)
t= string.split(data[1:],':'); sequence=''
transid_data = string.split(t[0],' '); transid = transid_data[0]; ensembl_id = t[-1]
if '.' in transid:
transid = string.split(transid,'.')[0] ### versioned IDs will cause matching issues
ind=0
#>ENST00000593546 cdna:known chromosome:GRCh37:HG27_PATCH:26597180:26600278:1 gene:ENSG00000268612 gene_biotype:protein_coding transcript_biotype:protein_coding
for item in t:
if 'gene_biotype' in item:
ensembl_id = string.split(item,' ')[0] ### In the following field
break
elif 'gene' in item and 'gene_' not in item:
ensembl_id = string.split(t[ind+1],' ')[0] ### In the following field
ind+=1
if '.' in ensembl_id:
ensembl_id = string.split(ensembl_id,'.')[0] ### versioned IDs will cause matching issues
"""
if 'gene' in t[-3]:
ensembl_id = string.split(t[-2],' ')[0] ### Case in Zm for plant and probably other cDNA files (different fields here!!!)
elif 'gene' not in t[-2]: ### After Ensembl version 64
for entry in t:
if 'gene_biotype' in entry: ensembl_id = string.split(entry,' ')[0]"""
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
datar.close(); dataw.close()
end_time = time.time(); time_diff = int(end_time-start_time)
gene_not_found = unique.unique(gene_not_found)
print len(genes_found), 'genes associated with reciprocal Ensembl junctions'
print len(gene_not_found), "genes not found in the reciprocol junction database (should be there unless conflict present - or few alternative genes predicted during junction array design)"
print gene_not_found[0:10],'not found examples'
if len(genes_found) < 10:
print '\n\nWARNING!!!!! Ensembl appears to have changed the formatting of this file, preventing propper import!!!!!!\n\n'
print "Ensembl transcript sequences analyzed in %d seconds" % time_diff
def importUCSCTranscriptSequences(species,array_type,probeset_seq_db):
start_time = time.time()
if force == 'yes':
### Download mRNA sequence file from website
import UI; species_names = UI.getSpeciesInfo()
species_full = species_names[species]
species_full = string.replace(species_full,' ','_')
ucsc_mRNA_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/bigZips','mrna.fa.gz')
output_dir = 'AltDatabase/'+species+'/SequenceData/'
try:
gz_filepath, status = update.download(ucsc_mRNA_dir,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
except Exception: null=[] ### Occurs when file is not available for this species
filename = 'AltDatabase/'+species+'/SequenceData/mrna.fa'
output_file = 'AltDatabase/'+species+'/SequenceData/output/'+array_type+'_UCSC-mRNA_alignments.txt'
dataw = export.ExportFile(output_file)
output_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/'+array_type+'_UCSC_mRNA_seqmatches.txt'
datar = export.ExportFile(output_file)
ucsc_mrna_to_gene = importUCSCTranscriptAssociations(species)
print "Begining generic fasta import of",filename
#'>gnl|ENS|Mm#S10859962 Mus musculus 12 days embryo spinal ganglion cDNA /gb=AK051143 /gi=26094349 /ens=Mm.1 /len=2289']
#'ATCGTGGTGTGCCCAGCTCTTCCAAGGACTGCTGCGCTTCGGGGCCCAGGTGAGTCCCGC'
fn=filepath(filename); sequence = '|'; ucsc_mRNA_hit_len={}; ucsc_probeset_null_hits={}; k=0
fn=filepath(filename); sequence = '|'; ucsc_mRNA_hit_len={}; ucsc_probeset_null_hits={}; k=0
for line in open(fn,'rU').xreadlines():
try: data, newline= string.split(line,'\n')
except ValueError: continue
if len(data)>0:
if data[0] != '#':
try:
if data[0] == '>':
if len(sequence) > 1:
if accession in ucsc_mrna_to_gene:
gene_found = 'no'
for ens_gene in ucsc_mrna_to_gene[accession]:
if ens_gene in probeset_seq_db:
sequence = string.upper(sequence); gene_found = 'yes'
mRNA_seq = sequence[1:]; mRNA_length = len(mRNA_seq)
k+=1; probeset_seq_data = probeset_seq_db[ens_gene]
results = simpleSeqMatchProtocol(probeset_seq_data,mRNA_seq)
for (call,probeset) in results:
dataw.write(string.join([probeset,str(call),accession],'\t')+'\n')
if gene_found == 'yes':
values = [accession,mRNA_seq]; values = string.join(values,'\t')+'\n'
datar.write(values)
values = string.split(data,' '); accession = values[0][1:]
sequence = '|'; continue
except IndexError: null = []
try:
if data[0] != '>': sequence = sequence + data
except IndexError: print kill; continue
datar.close()
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC mRNA sequences analyzed in %d seconds" % time_diff
def importUCSCTranscriptAssociations(species):
### Import GenBank ACs that are redundant with Ensembl ACs
filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC-accession-eliminated_mrna.txt'
fn=filepath(filename); remove={}
for line in open(fn,'rU').readlines():
mRNA_ac = cleanUpLine(line)
remove[mRNA_ac] = []
###This function is used to extract out EnsExon to EnsTranscript relationships to find out directly
###which probesets associate with which transcripts and then which proteins
filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC-accession-to-gene_mrna.txt'
fn=filepath(filename); ucsc_mrna_to_gene={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
mRNA_ac, ens_genes, num_ens_exons = t
#if mRNA_ac not in accession_index: ###only add mRNAs not examined in UniGene
ens_genes = string.split(ens_genes,'|')
if mRNA_ac not in remove: ucsc_mrna_to_gene[mRNA_ac] = ens_genes
return ucsc_mrna_to_gene
################# Import Exon/Junction Data #################
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self): return self._probeset
def GeneID(self): return self._geneid
def SetExonSeq(self,seq): self._exon_seq = seq
def ExonSeq(self): return string.upper(self._exon_seq)
def SetJunctionSeq(self,seq): self._junction_seq = seq
def JunctionSeq(self): return string.upper(self._junction_seq)
def RecipricolProbesets(self): return self._junction_probesets
def Report(self):
output = self.Probeset() +'|'+ self.ExternalGeneID()
return output
def __repr__(self): return self.Report()
class ExonDataSimple(SplicingAnnotationData):
def __init__(self,probeset_id,ensembl_gene_id):
self._geneid = ensembl_gene_id; self._probeset=probeset_id
class JunctionDataSimple(SplicingAnnotationData):
def __init__(self,probeset_id,array_geneid):
self._probeset=probeset_id; self._external_gene = array_geneid
def importSplicingAnnotationDatabase(filename):
global exon_db; fn=filepath(filename)
print 'importing', filename
exon_db={}; count = 0; x = 0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
t = string.split(data,'\t'); probeset_id = t[0]; ensembl_gene_id = t[2]
probe_data = ExonDataSimple(probeset_id,ensembl_gene_id)
exon_db[probeset_id] = probe_data
return exon_db
def importAllJunctionSequences(species,array_type):
probeset_annotations_file = "AltDatabase/"+species+"/"+array_type+'/'+species+"_Ensembl_probesets.txt"
if array_type == 'RNASeq':
probeset_annotations_file = "AltDatabase/"+species+"/"+array_type+'/'+species+"_Ensembl_junctions.txt"
junction_db = importSplicingAnnotationDatabase(probeset_annotations_file)
if coordinateBasedMatching and array_type == 'RNASeq':
probeset_seq_db = {}
else:
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-junction-seq.txt'
probeset_seq_db = importCriticalJunctionSeq(filename,species,array_type)
pairwise_probeset_combinations={}; probeset_gene_seq_db={}
for probeset in junction_db:
if probeset in probeset_seq_db:
probeset_seq,junction_seq = probeset_seq_db[probeset]
pd = junction_db[probeset]
pd.SetExonSeq(probeset_seq)
pd.SetJunctionSeq(junction_seq)
try: probeset_gene_seq_db[pd.GeneID()].append(pd)
except KeyError: probeset_gene_seq_db[pd.GeneID()] = [pd]
pairwise_probeset_combinations[probeset,' ']=[]
elif coordinateBasedMatching and array_type == 'RNASeq': ### Coordinate matching as opposed to sequence
pd = junction_db[probeset]
try: probeset_gene_seq_db[pd.GeneID()].append(pd)
except KeyError: probeset_gene_seq_db[pd.GeneID()] = [pd]
pairwise_probeset_combinations[probeset,' ']=[]
print len(probeset_gene_seq_db),"genes with probeset sequence associated"
return probeset_gene_seq_db,pairwise_probeset_combinations
def importJunctionAnnotationDatabaseAndSequence(species,array_type,biotype):
"""This function imports GeneID-Ensembl relationships, junction probeset sequences, and recipricol junction comparisons.
with data stored from this function, we can match probeset sequence to mRNAs and determine which combinations of probesets
can be used as match-match or match-nulls."""
array_ens_db={}
if array_type == 'AltMouse':
### Import AffyGene to Ensembl associations (e.g., AltMouse array)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl_relationships.txt'
update.verifyFile(filename,array_type) ### Will force download if missing
fn=filepath(filename); x = 0
for line in open(fn,'rU').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
array_gene,ens_gene = t
try: array_ens_db[array_gene].append(ens_gene)
except KeyError: array_ens_db[array_gene]=[ens_gene]
print len(array_ens_db), 'Ensembl-AltMouse relationships imported.'
if array_type == 'RNASeq' and coordinateBasedMatching == True:
probeset_seq_db={}
else:
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-junction-seq.txt'
probeset_seq_db = importCriticalJunctionSeq(filename,species,array_type)
###Import reciprocol junctions, so we can compare these directly instead of hits to nulls and combine with sequence data
###This short-cuts what we did in two function in ExonModule with exon level data
if array_type == 'AltMouse':
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_junction-comparisons.txt'
update.verifyFile(filename,array_type) ### Will force download if missing
elif array_type == 'junction':
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_junction_comps_updated.txt'
elif array_type == 'RNASeq':
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_junction_comps.txt'
fn=filepath(filename); probeset_gene_seq_db={}; added_probesets={}; pairwise_probesets={}; x = 0
for line in open(fn,'rU').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
if (array_type == 'junction' or array_type == 'RNASeq'):
array_gene, critical_exons,excl_junction,incl_junction, probeset2, probeset1, data_source = t
array_ens_db[array_gene]=[array_gene]
elif array_type == 'AltMouse':
array_gene,probeset1,probeset2,critical_exons = t #; critical_exons = string.split(critical_exons,'|')
probesets = [probeset1,probeset2]
pairwise_probesets[probeset1,probeset2] = []
if array_gene in array_ens_db:
ensembl_gene_ids = array_ens_db[array_gene]
for probeset_id in probesets:
if probeset_id in probeset_seq_db:
probeset_seq,junction_seq = probeset_seq_db[probeset_id]
if biotype == 'gene':
for ensembl_gene_id in ensembl_gene_ids:
if probeset_id not in added_probesets:
probe_data = JunctionDataSimple(probeset_id,array_gene)
probe_data.SetExonSeq(probeset_seq)
probe_data.SetJunctionSeq(junction_seq)
try: probeset_gene_seq_db[ensembl_gene_id].append(probe_data)
except KeyError: probeset_gene_seq_db[ensembl_gene_id] = [probe_data]
added_probesets[probeset_id]=[]
elif array_type == 'RNASeq' and coordinateBasedMatching == True: ### Coordinate matching as opposed to sequence
if biotype == 'gene':
for ensembl_gene_id in ensembl_gene_ids:
if probeset_id not in added_probesets:
probe_data = JunctionDataSimple(probeset_id,array_gene)
try: probeset_gene_seq_db[ensembl_gene_id].append(probe_data)
except KeyError: probeset_gene_seq_db[ensembl_gene_id] = [probe_data]
added_probesets[probeset_id]=[]
print len(probeset_gene_seq_db),"genes with probeset sequence associated"
print len(pairwise_probesets), "reciprocal junction pairs imported."
return probeset_gene_seq_db,pairwise_probesets
################# Import Sequence Match Results and Re-Output #################
def importCriticalJunctionSeq(filename,species,array_type):
update.verifyFile(filename,array_type) ### Will force download if missing
fn=filepath(filename); probeset_seq_db={}; x = 0
for line in open(fn,'rU').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
try: probeset,probeset_seq,junction_seq = t
except Exception:
try:
probeset,probeset_seq,junction_seq, null = t
except Exception: print filename,t;kill
if array_type == 'RNASeq':
### Ensure the junction sequence is sufficient for searching
left,right = string.split(probeset_seq,'|')
if len(left)>2 and len(right)>2: null=[]
else: probeset_seq = ''
if len(probeset_seq) < 8: probeset_seq = ''
probeset_seq=string.replace(probeset_seq,'|','')
probeset_seq_db[probeset] = probeset_seq,junction_seq
x+=1
print len(probeset_seq_db),'probesets with associated sequence'
return probeset_seq_db
def reAnalyzeRNAProbesetMatches(align_files,species,array_type,pairwise_probeset_combinations):
"""Import matching and non-matching probesets and export the valid comparisons"""
align_files2=[]
for file in align_files:
if array_type in file: align_files2.append(file)
align_files = align_files2
matching={}; not_matching={}
for filename in align_files:
print 'Reading',filename
start_time = time.time()
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
values = string.replace(line,'\n','')
probeset,call,accession = string.split(values,'\t')
if call == '1':
try: matching[probeset].append(accession)
except KeyError: matching[probeset] = [accession]
else:
try: not_matching[probeset].append(accession)
except KeyError: not_matching[probeset] = [accession]
probeset_matching_pairs={}; matching_in_both=0; match_and_null=0; no_matches=0; no_nulls=0
for (probeset1,probeset2) in pairwise_probeset_combinations:
if probeset1 in matching and probeset2 in matching:
matching[probeset1].sort(); matching[probeset2].sort()
match1 = string.join(matching[probeset1],'|')
match2 = string.join(matching[probeset2],'|')
if match1 != match2:
probeset_matching_pairs[probeset1+'|'+probeset2] = [match1,match2]
"""else:
print probeset1, probeset2, match1, match2;kill1"""
matching_in_both+=1
else:
if probeset1 in matching and probeset1 in not_matching:
match = string.join(matching[probeset1],'|')
null_match = string.join(filterNullMatch(not_matching[probeset1],matching[probeset1]),'|')
probeset_matching_pairs[probeset1] = [match,null_match]
match_and_null+=1
elif probeset2 in matching and probeset2 in not_matching:
match = string.join(matching[probeset2],'|')
null_match = string.join(filterNullMatch(not_matching[probeset2],matching[probeset2]),'|')
probeset_matching_pairs[probeset2] = [match,null_match]
match_and_null+=1
elif probeset1 in matching or probeset2 in matching: no_nulls+=1
else:
no_matches+=1
if no_matches<10: print probeset1,probeset2
print matching_in_both, "probeset pairs with matching isoforms for both recipricol probesets."
print match_and_null, "probeset pairs with a match for one and null for that one."
print no_nulls, "probeset pairs with only one match."
print no_matches, "probeset pairs with no matches."
from build_scripts import IdentifyAltIsoforms
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_all-transcript-matches.txt'
if analysis_type == 'single':
export_file = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_all-transcript-matches.txt'
IdentifyAltIsoforms.exportSimple(probeset_matching_pairs,export_file,'')
################# Main Run Options #################
def filterNullMatch(null_match,match):
### The null matching transcripts can be many and cause processing issues. Thus, first remove all non-Ensembls
slim_null_match=[]
if len(null_match)>20:
for transcript in null_match:
if transcript not in match: slim_null_match.append(transcript)
if len(slim_null_match)<20 and len(slim_null_match)>0:
null_match = slim_null_match
elif len(slim_null_match)>0:
null_match = slim_null_match; slim_null_match=[]
for transcript in null_match:
if 'ENS' in transcript:slim_null_match.append(transcript)
if len(slim_null_match)>0: null_match = slim_null_match
else: null_match = null_match[:19]
else: null_match = null_match[:19] ### Not ideal, but necessary to produce bloating
return null_match
def alignProbesetsToTranscripts(species,array_type,Analysis_type,Force, CoordinateBasedMatching = False):
global force; force = Force; global analysis_type; analysis_type = Analysis_type
global coordinateBasedMatching; coordinateBasedMatching = CoordinateBasedMatching
"""Match exon or junction probeset sequences to Ensembl and USCS mRNA transcripts"""
if array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq':
data_type = 'junctions'; probeset_seq_file=''; biotype = 'gene'
if data_type == 'junctions' and analysis_type == 'reciprocal':
start_time = time.time() ### Indicates whether to store information at the level of genes or probesets
probeset_seq_db,pairwise_probeset_combinations = importJunctionAnnotationDatabaseAndSequence(species,array_type,biotype)
end_time = time.time(); time_diff = int(end_time-start_time)
elif analysis_type == 'single':
start_time = time.time()
probeset_seq_db,pairwise_probeset_combinations = importAllJunctionSequences(species,array_type)
end_time = time.time(); time_diff = int(end_time-start_time)
print "Analyses finished in %d seconds" % time_diff
elif array_type == 'exon':
data_type = 'exon'
probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
###Import probe-level associations
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file)
start_time = time.time()
probeset_seq_db = importProbesetSequences(exon_db,species)
end_time = time.time(); time_diff = int(end_time-start_time)
print "Analyses finished in %d seconds" % time_diff
### Match probesets to mRNAs\=
from build_scripts import EnsemblImport
if coordinateBasedMatching == True and array_type == 'RNASeq':
EnsemblImport.exportTranscriptExonIDAssociations(species)
matchTranscriptExonIDsToJunctionIDs(species,array_type,probeset_seq_db) ### no sequences in probeset_seq_db, just junctionIDs
else:
#matchTranscriptExonIDsToJunctionIDs(species,array_type,probeset_seq_db) ### no sequences in probeset_seq_db, just junctionIDs
importEnsemblTranscriptSequence(species,array_type,probeset_seq_db)
try:
importUCSCTranscriptSequences(species,array_type,probeset_seq_db)
except Exception:
print traceback.format_exc()
pass ### If the species not supported by UCSC - the UCSC file is not written, but the other mRNA_alignments files should be available
probeset_seq_db={} ### Re-set db
### Import results if junction array to make comparisons valid for junction-pairs rather than a single probeset
if data_type == 'junctions':
### Re-import matches from above and export matching and non-matching transcripts for each probeset to a new file
import_dir = '/AltDatabase/'+species+'/SequenceData/output'
g = GrabFiles(); g.setdirectory(import_dir)
align_files = g.searchdirectory('mRNA_alignments')
reAnalyzeRNAProbesetMatches(align_files,species,array_type,pairwise_probeset_combinations)
if __name__ == '__main__':
a = 'AltMouse'; b = 'exon'; array_type = 'RNASeq'; force = 'no'
h = 'Hs'; m = 'Mm'; species = h; analysis_type = 'reciprocal'; analysis_type = 'single'
alignProbesetsToTranscripts(species,array_type,analysis_type,force,CoordinateBasedMatching = True)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/mRNASeqAlign.py
|
mRNASeqAlign.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from build_scripts import ExonAnalyze_module
import copy
import export
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if (entry[-4:] == ".txt"or entry[-4:] == ".tab" or entry[-4:] == ".csv" or '.fa' in entry) and '.gz' not in entry: dir_list2.append(entry)
return dir_list2
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file_dir,file = getDirectoryFiles(self.data,str(search_term))
if len(file)<1: print search_term,'not found',self.data
return file_dir
def getDirectoryFiles(import_dir, search_term):
exact_file = ''; exact_file_dir=''
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
dir_list.sort() ### Get the latest files
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file_dir = affy_data_dir; exact_file = data
return exact_file_dir,exact_file
########## End generic file import ##########
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
class ProteinFunctionalSeqData:
def __init__(self, protein_accession,primary_annotation, secondary_annotation, ft_start_pos, ft_end_pos, ft_seq):
self._protein_accession = protein_accession; self._primary_annotation = primary_annotation
self._secondary_annotation = secondary_annotation; self._ft_start_pos = ft_start_pos
self._ft_end_pos = ft_end_pos; self._ft_seq = ft_seq
def ProteinID(self): return self._protein_accession
def PrimaryAnnot(self): return self._primary_annotation
def SecondaryAnnot(self): return self._secondary_annotation
def CombinedAnnot(self): return self.PrimaryAnnot()+'-'+self.SecondaryAnnot()
def addGenomicCoordinates(self,gstart,gstop):
self.genomic_start = str(gstart)
self.genomic_stop = str(gstop) ### keep as string for output
def GenomicStart(self): return self.genomic_start
def GenomicStop(self): return self.genomic_stop
def DomainStart(self): return int(self._ft_start_pos)
def DomainEnd(self): return int(self._ft_end_pos)
def DomainSeq(self): return self._ft_seq
def DomainLen(self):
domain_len = self.DomainEnd()-self.DomainStart()
return domain_len
def SummaryValues(self):
output = self.PrimaryAnnot()+'|'+self.SecondaryAnnot()
return output
def __repr__(self): return self.SummaryValues()
class FullProteinSeqData:
def __init__(self, primary_id, secondary_ids, sequence, type):
self._primary_id = primary_id; self._secondary_ids = secondary_ids
self._sequence = sequence; self._type = type
def PrimaryID(self): return self._primary_id
def SecondaryID(self): return self._secondary_ids
def Sequence(self): return self._sequence
def SequenceLength(self): return len(self._sequence)
def AccessionType(self): return self._type
def SummaryValues(self):
output = self.PrimaryID()+'|'+self.SecondaryID()+'|'+self.AccessionType()
return output
def __repr__(self): return self.SummaryValues()
######Import ArrayID to Protein/Gene Relationships
def import_arrayid_ensembl(filename):
fn=filepath(filename); x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
gene_id,ensembl_gene_id = string.split(data,'\t')
try: ensembl_arrayid_db[ensembl_gene_id].append(gene_id)
except KeyError: ensembl_arrayid_db[ensembl_gene_id] = [gene_id]
def findDomainsByGenomeCoordinates(species,array_type,Data_type):
### Grab Ensembl relationships from a custom Ensembl Perl script or BioMart
global data_type; data_type = Data_type
protein_relationship_file,protein_feature_file,protein_seq_fasta,protein_coordinate_file = getEnsemblRelationshipDirs(species)
ens_transcript_protein_db = importEnsemblRelationships(protein_relationship_file,'transcript')
ens_protein_gene_db = importEnsemblRelationships(protein_relationship_file,'gene')
exon_protein_db = importEnsExonStructureDataSimple(species,ens_transcript_protein_db)
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
first_last_exon_coord_db = importEnsExonStructureDataCustom(filename,species,{})
### Add UCSC transcript data to ens_transcript_exon_db and ens_gene_transcript_db
try:
filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_COMPLETE-mrna.txt' ### Use the non-filtered database to propperly analyze exon composition
first_last_exon_coord_db = importEnsExonStructureDataCustom(filename,species,first_last_exon_coord_db)
except Exception: pass
if array_type == 'exon' or array_type == 'gene' or data_type == 'junction':
ens_probeset_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_probesets.txt"
if array_type == 'RNASeq':
ens_probeset_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_junctions.txt"
elif array_type == 'junction' or array_type == 'AltMouse': ens_probeset_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
elif array_type == 'RNASeq': ens_probeset_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_exons.txt"
probeset_domain_match_db={}; probeset_domain_indirect_match_db={}
protein_probeset_db,gene_probeset_db = importSplicingAnnotationDatabase(ens_probeset_file,exon_protein_db) ### Derived from ExonArrayEnsemblRules
probeset_domain_match_db,probeset_domain_indirect_match_db=matchEnsemblDomainCoordinates(protein_feature_file,species,array_type,protein_probeset_db,ens_protein_gene_db,gene_probeset_db,first_last_exon_coord_db,probeset_domain_match_db,probeset_domain_indirect_match_db)
protein_feature_file = 'AltDatabase/uniprot/'+species+'/'+species+'_FeatureCoordinate.txt'
probeset_domain_match_db,probeset_domain_indirect_match_db=matchEnsemblDomainCoordinates(protein_feature_file,species,array_type,protein_probeset_db,ens_protein_gene_db,gene_probeset_db,first_last_exon_coord_db,probeset_domain_match_db,probeset_domain_indirect_match_db)
exportProbesetDomainMappings(species,array_type,'',probeset_domain_match_db)
exportProbesetDomainMappings(species,array_type,'indirect_',probeset_domain_indirect_match_db)
def matchEnsemblDomainCoordinates(filename,species,array_type,protein_probeset_db,ens_protein_gene_db,gene_probeset_db,first_last_exon_coord_db,probeset_domain_match_db,probeset_domain_indirect_match_db):
fn=filepath(filename); x=0; probeset_domain_indirect_match={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1 ###Don't extract the headers
else:
coor_list=[]
ensembl_prot, aa_start, aa_stop, genomic_start, genomic_stop, name, interpro_id, description = string.split(data,'\t')
coor_list.append(int(genomic_start)); coor_list.append(int(genomic_stop)); coor_list.sort()
genomic_start, genomic_stop = coor_list
if ensembl_prot in protein_probeset_db and len(description)>1:
probeset_data = protein_probeset_db[ensembl_prot]
for sad in probeset_data:
proceed = 'no'
if ((genomic_start <= sad.ProbeStart()) and (genomic_stop >= sad.ProbeStart())) and ((genomic_stop >= sad.ProbeStop()) and (genomic_start <= sad.ProbeStop())):
overlap_area = int(abs(sad.ProbeStop()-sad.ProbeStart())/3)
proceed = 'yes'
elif ((genomic_start <= sad.ProbeStart()) and (genomic_stop >= sad.ProbeStart())):
overlap_area = int(abs(genomic_stop - sad.ProbeStart())/3)
proceed = 'yes'
elif ((genomic_stop >= sad.ProbeStop()) and (genomic_start <= sad.ProbeStop())):
overlap_area = int(abs(sad.ProbeStop() - genomic_start)/3)
proceed = 'yes'
if proceed == 'yes':
#if sad.Probeset() == '3217131': print ensembl_prot, sad.ProbeStart(),sad.ProbeStop(),genomic_start,genomic_stop,interpro_id,description;kill
ipd = description+'-'+interpro_id
ipd = string.replace(ipd,'--','-')
try: probeset_domain_match_db[sad.Probeset()].append(ipd)
except KeyError: probeset_domain_match_db[sad.Probeset()] = [ipd]
### Grab all gene associated probesets that are not in the first or last exon of any transcript (make a db of the first and last exon coordiantes of all analyzed transcripts also no UTR exons OR just remove those that have an alt-N, Alt-C or UTR annotation)
if ensembl_prot in ens_protein_gene_db:
ens_gene = ens_protein_gene_db[ensembl_prot]
if ens_gene in gene_probeset_db:
probeset_data = gene_probeset_db[ens_gene]
for sad in probeset_data:
if ((genomic_start <= sad.ProbeStart()) and (genomic_stop >= sad.ProbeStart())) or ((genomic_stop >= sad.ProbeStop()) and (genomic_start <= sad.ProbeStop())):
#if sad.Probeset() == '3217131': print ensembl_prot, sad.ProbeStart(),sad.ProbeStop(),genomic_start,genomic_stop,interpro_id,description;kill
ipd = description+'-'+interpro_id
try: probeset_domain_indirect_match[sad.Probeset()].append(ipd)
except KeyError: probeset_domain_indirect_match[sad.Probeset()] = [ipd]
probeset_domain_indirect_match = eliminateRedundant(probeset_domain_indirect_match)
probeset_domain_match_db = eliminateRedundant(probeset_domain_match_db) ###Remove redundant matches
print len(probeset_domain_match_db),'probesets with associated protein domains'
probeset_domain_indirect_match2={}
for probeset in probeset_domain_indirect_match:
if probeset not in probeset_domain_match_db: ### Only have probesets that don't directly map to domains
probeset_domain_indirect_match2[probeset] = probeset_domain_indirect_match[probeset]
probesets_to_exclude={}
for gene in gene_probeset_db:
### Remove probesets from database that overlap with the first or last exon of any associated transcript (not high confidence for domain alignment)
probeset_data = gene_probeset_db[gene]
for sad in probeset_data:
if sad.Probeset() in probeset_domain_indirect_match2:
exon_coordinates = first_last_exon_coord_db[gene]
for exon_coor in exon_coordinates:
exon_coor.sort()
genomic_start,genomic_stop = exon_coor
if ((genomic_start <= sad.ProbeStart()) and (genomic_stop >= sad.ProbeStart())) and ((genomic_stop >= sad.ProbeStop()) and (genomic_start <= sad.ProbeStop())):
probesets_to_exclude[sad.Probeset()] = []
#if sad.Probeset() == '3217131': print gene, sad.ProbeStart(),sad.ProbeStop(),genomic_start,genomic_stop;kill
for probeset in probeset_domain_indirect_match2:
if probeset not in probesets_to_exclude:
probeset_domain_indirect_match_db[probeset] = probeset_domain_indirect_match2[probeset]
print len(probeset_domain_indirect_match2), len(probesets_to_exclude), len(probeset_domain_indirect_match_db)
return probeset_domain_match_db,probeset_domain_indirect_match_db
def importEnsExonStructureDataCustom(filename,species,first_last_exon_coord_db):
fn=filepath(filename); x=0; ens_transcript_exon_db={}; ens_gene_transcript_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
exon_start = int(exon_start); exon_end = int(exon_end)
try: ens_transcript_exon_db[ens_transcriptid].append([exon_start,exon_end])
except KeyError: ens_transcript_exon_db[ens_transcriptid] = [[exon_start,exon_end]]
ens_gene_transcript_db[ens_transcriptid] = gene
for transcript in ens_transcript_exon_db:
gene = ens_gene_transcript_db[transcript]
try: first_last_exon_coord_db[gene].append(ens_transcript_exon_db[transcript][0])
except KeyError: first_last_exon_coord_db[gene] = [ens_transcript_exon_db[transcript][0]]
try: first_last_exon_coord_db[gene].append(ens_transcript_exon_db[transcript][-1])
except KeyError: first_last_exon_coord_db[gene] = [ens_transcript_exon_db[transcript][-1]]
first_last_exon_coord_db = eliminateRedundant(first_last_exon_coord_db)
return first_last_exon_coord_db
def exportProbesetDomainMappings(species,array_type,indirect_mapping,probeset_domain_match_db):
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = "AltDatabase/"+species+"/"+array_type+"/"+data_type+"/"+species+"_Ensembl_"+indirect_mapping+"domain_aligning_probesets.txt"
else:
export_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+indirect_mapping+"domain_aligning_probesets.txt"
data = export.createExportFile(export_file,"AltDatabase/"+species+"/"+array_type)
data.write('Probeset\tInterPro-Description\n')
for probeset in probeset_domain_match_db:
domain_info_list = probeset_domain_match_db[probeset]
for ipd in domain_info_list: data.write(probeset+'\t'+ipd+'\n')
data.close()
print "Direct probeset to domain associations exported to:",export_file
def importSplicingAnnotationDatabase(filename,exon_protein_db):
fn=filepath(filename); x=0; protein_probeset_db={}; gene_probeset_db={}
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
t=string.split(probeset_data,'\t'); probeset=t[0]; exon_id = t[1]; ens_gene=t[2]; probeset_start=t[6]; probeset_stop=t[7]; external_exonid=t[10]; splicing_event=t[-2]; strand = [5]
if '|' in probeset_start and '|' in probeset_stop:
### This occurs for junction coordinates. We need to make sure we propperly grab the extreem coordinates for negative strand exons
ps1 = string.split(probeset_start,'|'); ps2 = string.split(probeset_stop,'|')
if strand == '+':
probeset_start = ps1[0]; probeset_stop = ps2[-1]
else: probeset_start = ps2[0]; probeset_stop = ps1[-1]
sad = SplicingAnnotationData(probeset,probeset_start,probeset_stop)
if 'U' not in exon_id:
#if 'alt-C-term' not in splicing_event and 'alt-N-term' not in splicing_event and 'altPromoter' not in splicing_event:
try: gene_probeset_db[ens_gene].append(sad)
except KeyError: gene_probeset_db[ens_gene] = [sad]
if 'ENS' in external_exonid or 'ENS' not in ens_gene: ### We only need to examine probesets linked to Ensembl transcripts
external_exonid_list = string.split(external_exonid,'|'); ens_exonid_list=[]
for exon in external_exonid_list:
if 'ENS' in exon or 'ENS' not in ens_gene: ens_exonid_list.append(exon)
for ens_exon in ens_exonid_list:
if ens_exon in exon_protein_db:
ens_proteins = exon_protein_db[ens_exon]
for ens_protein_id in ens_proteins:
try: protein_probeset_db[ens_protein_id].append(sad)
except KeyError: protein_probeset_db[ens_protein_id] = [sad]
print len(protein_probeset_db),'Ensembl proteins with associated probesets'
return protein_probeset_db,gene_probeset_db
class SplicingAnnotationData:
def __init__(self,probeset,start,stop):
self._probeset = probeset; self._start = start; self._stop = stop
def Probeset(self): return self._probeset
def ProbeStart(self): return int(self._start)
def ProbeStop(self): return int(self._stop)
def importEnsExonStructureDataSimple(species,ens_transcript_protein_db):
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
fn=filepath(filename); x=0; exon_protein_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
if ens_transcriptid in ens_transcript_protein_db:
ens_protein_id = ens_transcript_protein_db[ens_transcriptid]
try: exon_protein_db[ens_exonid].append(ens_protein_id)
except KeyError: exon_protein_db[ens_exonid]=[ens_protein_id]
print len(exon_protein_db),'Ensembl exons with associated protein IDs'
return exon_protein_db
def import_ensembl_ft_data(species,filename,ensembl_arrayid_db,array_type):
"""Over the lifetime of this program, the inputs for protein sequences and relationships have changed.
To support multiple versions, this function now routes the data to two different possible functions,
grabbing the same type of data (InterPro relationships and protein sequence) from different sets of files"""
try: ensembl_protein_seq_db,ensembl_ft_db,domain_gene_counts = importCombinedEnsemblFTdata(filename,ensembl_arrayid_db,array_type)
except IOError:
### This is the current version which is supported
protein_relationship_file,protein_feature_file,protein_seq_fasta,protein_coordinate_file = getEnsemblRelationshipDirs(species)
ensembl_protein_seq_db = importEnsemblProtSeqFasta(protein_seq_fasta)
ensembl_protein_gene_db = importEnsemblRelationships(protein_relationship_file,'gene')
ensembl_ft_db, domain_gene_counts = importEnsemblFTdata(protein_feature_file,ensembl_arrayid_db,array_type,ensembl_protein_seq_db,ensembl_protein_gene_db)
return ensembl_protein_seq_db,ensembl_ft_db,domain_gene_counts
def getEnsemblRelationshipDirs(species):
import_dir = '/AltDatabase/ensembl/'+species
m = GrabFiles(); m.setdirectory(import_dir)
protein_relationship_file = m.searchdirectory(species+'_Ensembl_Protein_')
protein_seq_fasta = m.searchdirectory('pep.all')
protein_feature_file = m.searchdirectory(species+'_ProteinFeatures_')
protein_coordinate_file = m.searchdirectory(species+'_ProteinCoordinates_')
return protein_relationship_file,protein_feature_file,protein_seq_fasta,protein_coordinate_file
def remoteEnsemblProtSeqImport(species):
protein_relationship_file,protein_feature_file,protein_seq_fasta,protein_coordinate_file = getEnsemblRelationshipDirs(species)
return importEnsemblProtSeqFasta(protein_seq_fasta)
def importEnsemblProtSeqFasta(filename):
print "Begining generic fasta import of",filename
fn=filepath(filename); ensembl_protein_seq_db={}; sequence = ''
for line in open(fn,'r').xreadlines():
try: data, newline= string.split(line,'\n')
except ValueError: continue
try:
if data[0] == '>':
if len(sequence) > 0:
seq_data = FullProteinSeqData(ensembl_prot,[ensembl_prot],sequence,'EnsProt')
ensembl_protein_seq_db[ensembl_prot] = seq_data
### Parse new line
t= string.split(data[1:],' '); sequence=''
ensembl_prot = t[0]
if '.' in ensembl_prot: ### Introduced after Ensembl versoin 77
ensembl_prot = string.split(ensembl_prot,'.')[0]
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
seq_data = FullProteinSeqData(ensembl_prot,[ensembl_prot],sequence,'EnsProt')
ensembl_protein_seq_db[ensembl_prot] = seq_data
return ensembl_protein_seq_db
def importEnsemblRelationships(filename,type):
fn=filepath(filename); ensembl_protein_gene_db={}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1 ###Don't extract the headers
else:
ensembl_gene, ensembl_transcript, ensembl_protein = string.split(data,'\t')
if type == 'gene': ensembl_protein_gene_db[ensembl_protein] = ensembl_gene
if type == 'transcript': ensembl_protein_gene_db[ensembl_transcript] = ensembl_protein
return ensembl_protein_gene_db
def importEnsemblFTdata(filename,ensembl_arrayid_db,array_type,ensembl_protein_seq_db,ensembl_protein_gene_db):
print "Importing:",filename
global arrayid_ensembl_protein_db; arrayid_ensembl_protein_db={}; x=0
missing_prot_seq=[]; found_prot_seq={}
fn=filepath(filename); ensembl_ft_db = {}; ensembl_ft_summary_db = {}# Use the last database for summary statistics
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1 ###Don't extract the headers
else:
ensembl_prot, aa_start, aa_stop, start, stop, name, interpro_id, description = string.split(data,'\t')
if ensembl_prot in ensembl_protein_seq_db:
found_prot_seq[ensembl_prot]=[]
sd = ensembl_protein_seq_db[ensembl_prot]; protein_sequence = sd.Sequence()
ensembl_gene = ensembl_protein_gene_db[ensembl_prot]
ft_start_pos = aa_start; ft_end_pos = aa_stop
"""Below code is ALMOST identical to importCombinedEnsemblFTdata (original code commented out)"""
###If array_type is exon, ensembl is used as the primary gene ID and thus no internal arrayid is needed. Get only Ensembl's that are currently being analyzed (for over-representation analysis)
if ensembl_gene in ensembl_arrayid_db:
id_list = ensembl_arrayid_db[ensembl_gene]
for gene_id in id_list:
try: arrayid_ensembl_protein_db[gene_id].append(ensembl_prot)
except KeyError: arrayid_ensembl_protein_db[gene_id] = [ensembl_prot]
#for entry in ft_info_list:
"""try: peptide_start_end, gene_start_end, feature_source, interpro_id, description = string.split(entry,' ')
except ValueError: continue
###142-180 3015022-3015156 Pfam IPR002050 Env_polyprotein
ft_start_pos, ft_end_pos = string.split(peptide_start_end,'-')"""
pos1 = int(ft_start_pos); pos2 = int(ft_end_pos)
ft_length = pos2-pos1
if ft_length > 6: pos_1 = pos1; pos_2 = pos2
else:
if ft_length < 3: pos_1 = pos1 - 3; pos_2 = pos2 + 3
else: pos_1 = pos1 - 1; pos_2 = pos2 + 1
sequence_fragment = protein_sequence[pos_1:pos_2] ###We will search for this sequence, so have this expanded if too small (see above code)
if len(description)>1 or len(interpro_id)>1:
#ft_info = ProteinFunctionalSeqData(ensembl_prot,description,interpro_id,pos1,pos2,sequence_fragment)
ft_info = ensembl_prot,description,interpro_id,pos1,pos2,sequence_fragment,int(start),int(stop) ###don't store as an instance yet... wait till we eliminate duplicates
if ensembl_gene in ensembl_arrayid_db: ###If the ensembl gene is connected to microarray identifiers
arrayids = ensembl_arrayid_db[ensembl_gene]
for arrayid in arrayids: ###This file differs in structure to the UniProt data
try: ensembl_ft_db[arrayid].append(ft_info)
except KeyError: ensembl_ft_db[arrayid] = [ft_info]
else:
if ensembl_prot not in missing_prot_seq:
missing_prot_seq.append(ensembl_prot)
if len(missing_prot_seq): ### This never occured until parsing Zm Plant - the same protein sequences should be present from Ensembl for the same build
print 'WARNING!!!!!!! Missing protein sequence from protein sequence file for',len(missing_prot_seq),'proteins relative to',len(found_prot_seq),'found.'
print 'missing examples:',missing_prot_seq[:10]
ensembl_ft_db2 = {}
ensembl_ft_db = eliminateRedundant(ensembl_ft_db) ###duplicate interprot information is typically present
for arrayid in ensembl_ft_db:
for ft_info in ensembl_ft_db[arrayid]:
ensembl_prot,description,interpro_id,pos1,pos2,sequence_fragment,gstart,gstop = ft_info
ft_info2 = ProteinFunctionalSeqData(ensembl_prot,description,interpro_id,pos1,pos2,sequence_fragment)
ft_info2.addGenomicCoordinates(gstart,gstop) ### Add the genomic start and stop of the domain to keep track of where this domain is located
try: ensembl_ft_db2[arrayid].append(ft_info2)
except KeyError: ensembl_ft_db2[arrayid] = [ft_info2]
domain_gene_counts = summarizeEnsDomainData(ensembl_ft_db2)
return ensembl_ft_db2,domain_gene_counts
def importCombinedEnsemblFTdata(filename,ensembl_arrayid_db,array_type):
global arrayid_ensembl_protein_db; arrayid_ensembl_protein_db={}
fn=filepath(filename); x = 0; ensembl_ft_db = {}; ensembl_ft_summary_db = {}# Use the last database for summary statistics
ensembl_protein_seq_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 1:
try: ensembl_gene, chr, mgi, uniprot, ensembl_prot, protein_sequence, position_info = string.split(data,'\t')
except ValueError: continue
ft_info_list = string.split(position_info,' | ')
seq_data = FullProteinSeqData(ensembl_prot,[ensembl_prot],protein_sequence,'EnsProt')
ensembl_protein_seq_db[ensembl_prot] = seq_data ###use this db as input for the UniProt exon based ft search below
###If array_type is exon, ensembl is used as the primary gene ID and thus no internal arrayid is needed. Get only Ensembl's that are currently being analyzed (for over-representation analysis)
if ensembl_gene in ensembl_arrayid_db:
id_list = ensembl_arrayid_db[ensembl_gene]
for gene_id in id_list:
try: arrayid_ensembl_protein_db[gene_id].append(ensembl_prot)
except KeyError: arrayid_ensembl_protein_db[gene_id] = [ensembl_prot]
for entry in ft_info_list:
try: peptide_start_end, gene_start_end, feature_source, interpro_id, description = string.split(entry,' ')
except ValueError: continue
###142-180 3015022-3015156 Pfam IPR002050 Env_polyprotein
ft_start_pos, ft_end_pos = string.split(peptide_start_end,'-')
pos1 = int(ft_start_pos); pos2 = int(ft_end_pos)
ft_length = pos2-pos1
if ft_length > 6: pos_1 = pos1; pos_2 = pos2
else:
if ft_length < 3: pos_1 = pos1 - 3; pos_2 = pos2 + 3
else: pos_1 = pos1 - 1; pos_2 = pos2 + 1
sequence_fragment = protein_sequence[pos_1:pos_2] ###We will search for this sequence, so have this expanded if too small (see above code)
if len(description)>1 or len(interpro_id)>1:
ft_info = ProteinFunctionalSeqData(ensembl_prot,description,interpro_id,pos1,pos2,sequence_fragment)
if ensembl_gene in ensembl_arrayid_db: ###If the ensembl gene is connected to microarray identifiers
arrayids = ensembl_arrayid_db[ensembl_gene]
for arrayid in arrayids: ###This file differs in structure to the UniProt data
try: ensembl_ft_db[arrayid].append(ft_info)
except KeyError: ensembl_ft_db[arrayid] = [ft_info]
else:
if data[0:6] == 'GeneID': x = 1
domain_gene_counts = summarizeEnsDomainData(ensembl_ft_db)
return ensembl_protein_seq_db,ensembl_ft_db,domain_gene_counts
def summarizeEnsDomainData(ensembl_ft_db):
"""This is a function because the data can be extracted from different functions, using different file formats"""
ensembl_ft_db = eliminateRedundant(ensembl_ft_db)
domain_gene_counts = {}; domain_gene_counts2 = {}
###Count the number of domains present in all genes (count a domain only once per gene)
for gene in ensembl_ft_db:
for ft_info in ensembl_ft_db[gene]:
try: domain_gene_counts[ft_info.PrimaryAnnot(),ft_info.SecondaryAnnot()].append(gene)
except KeyError: domain_gene_counts[ft_info.PrimaryAnnot(),ft_info.SecondaryAnnot()] = [gene]
domain_gene_counts = eliminateRedundant(domain_gene_counts)
for (primary,secondary) in domain_gene_counts:
if len(secondary)>0: key = primary+'-'+secondary
else: key = primary
domain_gene_counts2[key] = len(domain_gene_counts[(primary,secondary)])
domain_gene_counts = domain_gene_counts2
print "Number of Ensembl genes, linked to array genes with domain annotations:",len(ensembl_ft_db)
print "Number of Ensembl domains:",len(domain_gene_counts)
return domain_gene_counts
def import_arrayid_uniprot(filename):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
arrayid, uniprot = string.split(data,'\t')
try: arrayid_uniprot_db[arrayid].append(uniprot)
except KeyError: arrayid_uniprot_db[arrayid] = [uniprot]
try: uniprot_arrayid_db[uniprot].append(arrayid)
except KeyError: uniprot_arrayid_db[uniprot] = [arrayid]
def importUniProtSeqeunces(species,ensembl_arrayid_db,array_type):
filename = 'AltDatabase/uniprot/'+species+'/'+'uniprot_sequence.txt'
fn=filepath(filename); uniprot_protein_seq_db = {}; external_transcript_to_uniprot_protein_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
id=t[0];ac=t[1];ensembls=t[4];seq=t[2];type=t[6];unigenes=t[7];embls=t[9]
ac=string.split(ac,','); ensembls=string.split(ensembls,','); embls=string.split(embls,','); unigenes=string.split(unigenes,',')
y = FullProteinSeqData(id,ac,seq,type)
if type=='swissprot': uniprot_protein_seq_db[id] = y
if array_type != 'AltMouse':
for ensembl in ensembls:
if len(ensembl)>0 and ensembl in ensembl_arrayid_db: ###remove genes not being analyzed now
###This database replaces the empty arrayid_uniprot_db
try: arrayid_uniprot_db[ensembl].append(id)
except KeyError: arrayid_uniprot_db[ensembl] = [id]
try: uniprot_arrayid_db[id].append(ensembl)
except KeyError: uniprot_arrayid_db[id] = [ensembl]
return uniprot_protein_seq_db
######## End - Derive protein predictions for Exon array probesets
class EnsemblProteinPositionData:
def __init__(self, aa_nt_start, aa_nt_stop, genomic_start, genomic_stop):
self.aa_nt_start = aa_nt_start; self.aa_nt_stop = aa_nt_stop
self.genomic_start=genomic_start;self.genomic_stop = genomic_stop
if self.GenomicStartPos()>self.GenomicStopPos(): self.strand = '-'
else: self.strand = '+'
def ResidueStartPos(self): return int(self.aa_nt_start)
def ResidueStopPos(self): return int(self.aa_nt_stop)
def GenomicStartPos(self): return int(self.genomic_start)
def GenomicStopPos(self): return int(self.genomic_stop)
def Strand(self): return self.strand
def importEnsProteinCoordinates(protein_coordinate_file):
fn=filepath(protein_coordinate_file); ens_protein_pos_db={}; x=0
for line in open(fn,'rU').xreadlines():
if x==0: x=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
protienid, exonid, aa_nt_start, aa_nt_stop, genomic_start, genomic_stop = t
ep = EnsemblProteinPositionData(aa_nt_start, aa_nt_stop, genomic_start, genomic_stop)
try: ens_protein_pos_db[protienid].append(ep) ### For each exon
except Exception: ens_protein_pos_db[protienid] = [ep]
return ens_protein_pos_db
def importEnsemblUniprot(species):
filename = 'AltDatabase/uniprot/'+species+'/'+species+'_Ensembl-UniProt.txt'
uniprot_ensembl_db={}
fn=filepath(filename)
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ensembl=t[0];uniprot=t[1]
try: uniprot_ensembl_db[uniprot].append(ensembl)
except KeyError: uniprot_ensembl_db[uniprot] = [ensembl]
uniprot_ensembl_db = eliminateRedundant(uniprot_ensembl_db)
print len(uniprot_ensembl_db),"UniProt entries with Ensembl annotations"
return uniprot_ensembl_db
def getGenomicPosition(ac,ens_protein,uniprot_seq_len,pos1,pos2,ep_list):
residue_positions=[]
for ep in ep_list: ### For each exon
pos1_contained = 0; pos2_contained = 0
if pos1 >= ep.ResidueStartPos() and pos1 <= ep.ResidueStopPos(): pos1_contained = 1
if pos2 >= ep.ResidueStartPos() and pos2 <= ep.ResidueStopPos(): pos2_contained = 1
residue_positions.append(ep.ResidueStopPos())
if pos1_contained == 1:
start_offset = pos1 - ep.ResidueStartPos()
if ep.Strand() == '+':
genomic_feature_start = ep.GenomicStartPos()+(start_offset*3)
else:
genomic_feature_start = ep.GenomicStartPos()-(start_offset*3)-1
if ens_protein == 'ENSMUSP00000044603':
print 'start',ep.Strand(), ep.ResidueStartPos(),ep.ResidueStopPos(),pos1_contained,pos2_contained,ep.GenomicStartPos(),ep.GenomicStopPos(),genomic_feature_start,pos1,pos2,start_offset
if pos2_contained == 1:
stop_offset = pos2 - ep.ResidueStartPos()
if ep.Strand() == '+':
genomic_feature_stop = ep.GenomicStartPos()+(stop_offset*3)
else:
genomic_feature_stop = ep.GenomicStartPos()-(stop_offset*3)+2
if ens_protein == 'ENSMUSP00000044603':
print 'stop',ep.Strand(),ep.ResidueStartPos(),ep.ResidueStopPos(),pos1_contained,pos2_contained,ep.GenomicStartPos(),ep.GenomicStopPos(),genomic_feature_stop,pos1,pos2,stop_offset
if uniprot_seq_len == residue_positions[-1]:
try: return genomic_feature_start,genomic_feature_stop,'found' ### both should be found if everything is accurately built beforehand
except Exception: null=[] #print ens_protein,ac,pos1,pos2, uniprot_seq_len, residue_positions[-1],ep.Strand(), ep.ResidueStartPos(), ep.ResidueStopPos(),genomic_feature_stop,genomic_feature_start
else:
#print ens_protein,ac,pos1,pos2, uniprot_seq_len, residue_positions[-1],ep.Strand()
return 0,0,'failed'
def import_uniprot_ft_data(species,protein_coordinate_file,domain_gene_counts,ensembl_arrayid_db,array_type):
""" This function exports genomic coordinates for each UniProt feature ***IF*** valid protein IDs are present in Ensembl-UniProt file (derived from UniProt)"""
uniprot_protein_seq_db = importUniProtSeqeunces(species,ensembl_arrayid_db,array_type)
uniprot_feature_file = 'AltDatabase/uniprot/'+species+'/'+'uniprot_feature_file.txt'
### Import protein coordinate genomic and protein positions
ens_protein_pos_db = importEnsProteinCoordinates(protein_coordinate_file)
### Impoer UniProt to Ensembl protein accession relationships
uniprot_ensembl_db = importEnsemblUniprot(species)
export_data = export.ExportFile('AltDatabase/uniprot/'+species+'/'+species+'_FeatureCoordinate.txt')
export_data.write('ensembl_prot\taa_start\taa_stop\tgenomic_start\tgenomic_stop\tname\tinterpro_id\tdescription\n')
fn=filepath(uniprot_feature_file); uniprot_ft_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: primary_uniprot_id,ac,ft,pos1,pos2,annotation = t
except ValueError:
try: primary_uniprot_id,ac,ft,pos1,pos2 = t
except ValueError:
###Not sure why, but an extra \t is in at least one description.
primary_uniprot_id = t[0]; ac=t[1]; ft=t[2];pos1=t[3];pos2=t[4];annotation=string.join(t[-2:])
try: pos2,annotation = string.split(pos2,'/')
except ValueError: annotation = ''
annotation = '/' + annotation
try:
annotation = annotation[0:-1]
if '(By similarity)' in annotation: annotation,null = string.split(annotation,'(By similarity)')
if '(Potential)' in annotation: annotation,null = string.split(annotation,'(Potential)')
if 'By similarity' in annotation: annotation,null = string.split(annotation,'By similarity')
if 'Potential' in annotation: annotation,null = string.split(annotation,'Potential')
try:
if ' ' == annotation[-1]: annotation = annotation[0:-1]
except IndexError: annotation = annotation
if '.' in annotation: annotation,null = string.split(annotation,'.')
pos1 = int(pos1); pos2 = int(pos2)
### Compare the position of each protein (where a matched Ensembl protein is known) to UniProt domain position to
### Identify genomic coordinates for overlap analysis
ac_list = string.split(ac,',') ### Can be a comma separated list
for ac in ac_list[:1]: ### We only select the first representative one, since this should be sufficient (need to determine more rigourously though - 7/15/12)
if ac in uniprot_ensembl_db:
if ft != 'CHAIN' and ft != 'CONFLICT' and ft != 'VARIANT' and ft != 'VARSPLIC' and ft != 'VAR_SEQ' and '>' not in annotation:
ens_protein_list = uniprot_ensembl_db[ac]
for ens_protein in ens_protein_list: ### Can be composed of gene and protein IDs, so just loop through it and see which matches (should only be one match)
if ens_protein in ens_protein_pos_db and primary_uniprot_id in uniprot_protein_seq_db:
uniprot_seq_len = uniprot_protein_seq_db[primary_uniprot_id].SequenceLength()
ep_list = ens_protein_pos_db[ens_protein]
### ep_list is a list of exon coordinates and corresponding protein positions
try: genomic_feature_start,genomic_feature_stop,status = getGenomicPosition(ac,ens_protein,uniprot_seq_len,pos1,pos2,ep_list)
except Exception: status == 'not'
if status == 'found':
new_annotation = ft+'-'+string.replace(annotation,';','')
values = [ens_protein,str(pos1),str(pos2),str(genomic_feature_start),str(genomic_feature_stop),'','UniProt',new_annotation]
export_data.write(string.join(values,'\t')+'\n')
pos1 = pos1-1
ft_length = pos2-pos1
if ft_length > 6: pos_1 = pos1; pos_2 = pos2
else:
if ft_length < 3: pos_1 = pos1 - 3; pos_2 = pos2 + 3
else: pos_1 = pos1 - 1; pos_2 = pos2 + 1
if primary_uniprot_id in uniprot_protein_seq_db:
full_prot_seq = uniprot_protein_seq_db[primary_uniprot_id].Sequence()
sequence_fragment = full_prot_seq[pos_1:pos_2] ###We will search for this sequence, so have this expanded if too small (see above code)
if ft != 'CHAIN' and ft != 'CONFLICT' and ft != 'VARIANT' and ft != 'VARSPLIC' and ft != 'VAR_SEQ' and '>' not in annotation: ###exlcludes variant, splice variant SNP and conflict info
ft_info = ProteinFunctionalSeqData(primary_uniprot_id,ft,annotation,pos1,pos2,sequence_fragment)
try:
ft_info.addGenomicCoordinates(genomic_feature_start,genomic_feature_stop) ### Add the genomic start and stop of the domain to keep track of where this domain is located
except Exception:
None ### See above, this occurs when certain features can not be matched between the isoform and the domain (shouldn't occur)
###Store the primary ID as the arrayid (gene accession number)
if primary_uniprot_id in uniprot_arrayid_db:
arrayids = uniprot_arrayid_db[primary_uniprot_id]
for arrayid in arrayids:
try: uniprot_ft_db[arrayid].append(ft_info)
except KeyError: uniprot_ft_db[arrayid] = [ft_info]
else:
###Occurs for non-SwissProt ft_data (e.g. TrEMBL)
continue
except ValueError: continue
domain_gene_count_temp={}
for geneid in uniprot_ft_db:
for ft_info in uniprot_ft_db[geneid]:
try: domain_gene_count_temp[ft_info.PrimaryAnnot(),ft_info.SecondaryAnnot()].append(geneid)
except KeyError: domain_gene_count_temp[ft_info.PrimaryAnnot(),ft_info.SecondaryAnnot()] = [geneid]
domain_gene_count_temp = eliminateRedundant(domain_gene_count_temp)
for (primary,secondary) in domain_gene_count_temp:
if len(secondary)>0: key = primary+'-'+secondary
else: key = primary
domain_gene_counts[key] = len(domain_gene_count_temp[(primary,secondary)])
export_data.close()
print "Number of species uniprot entries imported", len(uniprot_protein_seq_db)
print "Number of species feature containing entries imported", len(uniprot_ft_db)
return uniprot_protein_seq_db,uniprot_ft_db,domain_gene_counts
def customDeepCopy(db):
db2={}
for i in db:
try: ###occurs when the contents of the dictionary are an item versus a list
for e in db[i]:
try: db2[i].append(e)
except KeyError: db2[i]=[e]
except TypeError:
db2[i] = db[i]
return db2
def eliminateRedundant(database):
for key in database:
try:
list = makeUnique(database[key])
list.sort()
except Exception: list = unique.unique(database[key])
database[key] = list
return database
def makeUnique(item):
db1={}; list1=[]
for i in item: db1[i]=[]
for i in db1: list1.append(i)
list1.sort()
return list1
def grab_exon_level_feature_calls(species,array_type,genes_analyzed):
arrayid_uniprot_file = 'AltDatabase/uniprot/'+species+'/'+'arrayid-uniprot.txt'
arrayid_ensembl_file = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl_relationships.txt'
ensembl_ft_file = 'AltDatabase/ensembl/'+species+'/'+'DomainFile_All.txt'
null,null,null,protein_coordinate_file = getEnsemblRelationshipDirs(species)
global uniprot_arrayid_db; uniprot_arrayid_db = {}; global arrayid_uniprot_db; arrayid_uniprot_db = {}
global ensembl_arrayid_db; ensembl_arrayid_db={}
if array_type == 'AltMouse':
update.verifyFile(arrayid_uniprot_file,array_type) ### Will force download if missing
update.verifyFile(arrayid_ensembl_file,array_type) ### Will force download if missing
import_arrayid_uniprot(arrayid_uniprot_file)
import_arrayid_ensembl(arrayid_ensembl_file)
###Otherwise, these databases can be built on-the-fly in downstream methods, since Ensembl will be used as the array gene id
else: ensembl_arrayid_db = genes_analyzed ###ensembl to ensembl for those being analyzed in the program
ensembl_protein_seq_db,ensembl_ft_db,domain_gene_counts = import_ensembl_ft_data(species,ensembl_ft_file,ensembl_arrayid_db,array_type) ###Import function domain annotations for Ensembl proteins
print 'Ensembl based domain feature genes:',len(ensembl_ft_db),len(domain_gene_counts)
uniprot_protein_seq_db,uniprot_ft_db,domain_gene_counts = import_uniprot_ft_data(species,protein_coordinate_file,domain_gene_counts,ensembl_arrayid_db,array_type) ###" " " " UniProt "
print 'UniProt based domain feature genes:',len(uniprot_ft_db),len(domain_gene_counts)
arrayid_ft_db = combineDatabases(uniprot_ft_db,ensembl_ft_db) ###arrayid relating to classes of functional domain attributes and associated proteins (ensembl and uniprot)
return arrayid_ft_db,domain_gene_counts
def combineDatabases(x,y):
db1 = customDeepCopy(x); db2 = customDeepCopy(y); db3={}
for entry in db1: db3[entry] = db1[entry]
for entry in db2:
if entry in db3: db3[entry]+=db2[entry]
else: db3[entry]=db2[entry]
return db3
def clearall():
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all: del globals()[var]
def importGeneAnnotations(species):
### Used for internal testing
gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations_simple.txt"
fn=filepath(gene_annotation_file)
count = 0; gene_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if count == 0: count = 1
else:
gene, description, symbol = string.split(data,'\t')
gene_db[gene] = [gene]
return gene_db
if __name__ == '__main__':
species = 'Mm'
array_type = 'RNASeq'
genes_analyzed = importGeneAnnotations(species)
uniprot_arrayid_db={}
grab_exon_level_feature_calls(species,array_type,genes_analyzed); sys.exit()
protein_coordinate_file = '/Users/nsalomonis/Desktop/AltAnalyze/AltDatabase/EnsMart16/ensembl/Zm/Zm_ProteinCoordinates_build_16_69_5.tab'
#mport_uniprot_ft_data(species,protein_coordinate_file,[],[],'RNASeq');sys.exit()
species = 'Rn'; array_type = 'AltMouse'
getEnsemblRelationshipDirs(species);sys.exit()
findDomainsByGenomeCoordinates(species,array_type); sys.exit()
matchEnsemblDomainCoordinates(protein_feature_file,species,array_type,protein_probeset_db,ens_protein_gene_db,gene_probeset_db)
kill
protein_relationship_file,protein_feature_file,protein_seq_fasta,protein_coordinate_file = getEnsemblRelationshipDirs(species)
ens_transcript_protein_db = importEnsemblRelationships(protein_relationship_file,'transcript') ### From Perl script to Ensembl API
ens_protein_gene_db = importEnsemblRelationships(protein_relationship_file,'gene') ### From Perl script to Ensembl API
exon_protein_db = importEnsExonStructureDataSimple(species,ens_transcript_protein_db) ### From BioMart
#kill
if array_type == 'exon': ens_probeset_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_probesets.txt"
else: ens_probeset_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
protein_probeset_db,gene_probeset_db = importSplicingAnnotationDatabase(ens_probeset_file,exon_protein_db) ### Derived from ExonArrayEnsemblRules
#matchEnsemblDomainCoordinates(protein_feature_file,species,array_type,protein_probeset_db,ens_protein_gene_db,gene_probeset_db)
kill
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/FeatureAlignment.py
|
FeatureAlignment.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from stats_scripts import statistics
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def grabJunctionData(species,array_type,key_type,root_dir):
if array_type == 'AltMouse': filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_junction-comparisons.txt'
else:
filename = 'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
if array_type == 'RNASeq': filename = root_dir+filename ### This is a dataset specific file
fn=filepath(filename); critical_exon_junction_db = {}; x=0
for line in open(fn,'rU').xreadlines():
if x==0: x=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
if array_type == 'AltMouse':
geneid,probeset1,probeset2,critical_exons = t
probesets = [probeset1, probeset2]; critical_exons = string.split(critical_exons,'|')
else:
geneid,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source = t
probeset1 = incl_junction_probeset; probeset2 = excl_junction_probeset
probesets = [incl_junction_probeset, excl_junction_probeset]; critical_exons = [critical_exon]
for exon in critical_exons:
if key_type == 'gene-exon':
key = geneid+':'+exon
###Record for each probeset what critical junctions it is associated with
try: critical_exon_junction_db[key].append((probeset1, probeset2))
except KeyError: critical_exon_junction_db[key] = [(probeset1, probeset2)]
#print key,(probeset1, probeset2)
else: critical_exon_junction_db[(probeset1, probeset2)] = critical_exons
return critical_exon_junction_db
class GeneAnnotationData:
def __init__(self, geneid, description, symbol, external_geneid, rna_processing_annot):
self._geneid = geneid; self._description = description; self._symbol = symbol
self._rna_processing_annot = rna_processing_annot; self._external_geneid = external_geneid
def GeneID(self): return self._geneid
def Description(self): return self._description
def Symbol(self): return self._symbol
def ExternalGeneID(self): return self._external_geneid
def TranscriptClusterIDs(self):
if self.GeneID() in gene_transcript_cluster_db:
transcript_cluster_list = gene_transcript_cluster_db[self.GeneID()]
return transcript_cluster_list
else:
try: return transcript_cluster_list
except UnboundLocalError: return [''] ### Occurs for SplicingIndex method with AltMouse data
def RNAProcessing(self): return self._rna_processing_annot
def Report(self):
output = self.GeneID() +'|'+ self.Symbol() +'|'+ self.Description()
return output
def __repr__(self): return self.Report()
def import_annotations(filename,array_type,keyBySymbol=False):
fn=filepath(filename); annotate_db = {}; x = 0
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
try: affygene, description, ll_id, symbol, rna_processing_annot = string.split(data,'\t')
except ValueError: affygene, description, ll_id, symbol = string.split(data,'\t'); rna_processing_annot = ''
if '"' in description: null,description,null = string.split(description,'"')
y = GeneAnnotationData(affygene, description, symbol, ll_id, rna_processing_annot)
if keyBySymbol:
annotate_db[symbol] = y
else:
annotate_db[affygene] = y
else:
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
rna_processing_annot=''
try: ensembl, symbol, description, rna_processing_annot = string.split(data,'\t')
except ValueError: ensembl, description, symbol = string.split(data,'\t')
y = GeneAnnotationData(ensembl, description, symbol, ensembl, rna_processing_annot)
annotate_db[ensembl] = y
if keyBySymbol:
annotate_db[symbol] = y
else:
annotate_db[ensembl] = y
return annotate_db
def importmicroRNADataExon(species,array_type,exon_db,microRNA_prediction_method,explicit_data_type,root_dir):
filename = "AltDatabase/"+species+"/"+array_type+"/"+species+"_probeset_microRNAs_"+microRNA_prediction_method+".txt"
if array_type == 'junction': filename = string.replace(filename,'.txt','-filtered.txt')
fn=filepath(filename); microRNA_full_exon_db={}; microRNA_count_db={}; gene_microRNA_denom={}
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
critical_exon_junction_db = grabJunctionData(species,array_type,'gene-exon',root_dir)
if array_type == 'AltMouse':
gene_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+array_type+"_gene_annotations.txt"
annotate_db = import_annotations(gene_annotation_file,array_type)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
probeset=t[0];microRNA=t[1]
try: mir_seq=t[2];mir_sources=t[3]
except IndexError: mir_seq='';mir_sources=''
try:
if array_type == 'exon' or array_type == 'gene' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null'):
ed = exon_db[probeset]; probeset_list = [probeset]; exonid = ed.ExonID(); symbol = ed.Symbol(); geneid = ed.GeneID()
else:
probeset_list = []; uid = probeset ###This is the gene with the critical exon it's mapped to
for junctions in critical_exon_junction_db[uid]:
if junctions in exon_db:
geneid = exon_db[junctions].GeneID()
if array_type == 'AltMouse':
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
else: symbol = geneid
else:
try: symbol = exon_db[junctions].Symbol()
except Exception: symbol=''
else: geneid = ''
#if junctions in exon_db:
probeset_list.append(junctions)
if len(geneid)>0:
microRNA_info = microRNA, symbol, mir_seq, mir_sources
for probeset in probeset_list:
try: microRNA_full_exon_db[geneid,probeset].append(microRNA_info)
except KeyError: microRNA_full_exon_db[geneid,probeset] = [microRNA_info]
try: microRNA_count_db[microRNA].append(geneid)
except KeyError: microRNA_count_db[microRNA] = [geneid]
#if 'ENS' in microRNA: print [data,t];kill
gene_microRNA_denom[geneid] = []
except KeyError: null=[]
microRNA_count_db = eliminate_redundant_dict_values(microRNA_count_db)
###Replace the actual genes with the unique gene count per microRNA
for microRNA in microRNA_count_db: microRNA_count_db[microRNA] = len(microRNA_count_db[microRNA])
if array_type == 'RNASeq': id_name = 'junction IDs'
else: id_name = 'array IDs'
print len(gene_microRNA_denom),"genes with a predicted microRNA binding site aligning to a",id_name
return microRNA_full_exon_db,microRNA_count_db,gene_microRNA_denom
def filterMicroRNAProbesetAssociations(microRNA_full_exon_db,exon_hits):
filtered_microRNA_exon_db = {}
microRNA_gene_count_db = {}
for key in microRNA_full_exon_db:
array_geneid = key[0]
if key in exon_hits:
filtered_microRNA_exon_db[key] = microRNA_full_exon_db[key]
for (microRNA,gene_symbol,seq,source) in microRNA_full_exon_db[key]:
gene_info = array_geneid, gene_symbol
try: microRNA_gene_count_db[microRNA].append(gene_info)
except KeyError: microRNA_gene_count_db[microRNA] = [gene_info]
return filtered_microRNA_exon_db
class ExonSequenceData:
def __init__(self, gene_id, exon_id, exon_seq, complete_mRNA_length,strand):
self._gene_id = gene_id; self._exon_id = exon_id; self._strand = strand
self._exon_seq = exon_seq; self._complete_mRNA_length = complete_mRNA_length
def GeneID(self): return self._gene_id
def ExonID(self): return self._exon_id
def ExonSeq(self): return self._exon_seq
def RNALen(self): return int(self._complete_mRNA_length)
def Strand(self): return self._strand
def SummaryValues(self):
output = self.GeneID()+'|'+self.SecondaryID()+'|'+self.ExonID()
return output
def __repr__(self): return self.SummaryValues()
class ExonProteinAlignmentData:
def __init__(self,geneid,probeset,exonid,protein_hit_id,protein_null_id):
self._gene_id = geneid; self._exon_id = exonid; self._probeset = probeset
self._protein_hit_id = protein_hit_id; self._protein_null_id = protein_null_id
def GeneID(self): return self._gene_id
def ExonID(self): return self._exon_id
def Probeset(self): return self._probeset
def HitProteinID(self): return self._protein_hit_id
def NullProteinID(self): return self._protein_null_id
def RecordHitProteinExonIDs(self,hit_exon_ids): self._hit_exon_ids = hit_exon_ids
def RecordNullProteinExonIDs(self,null_exon_ids): self._null_exon_ids = null_exon_ids
def HitProteinExonIDs(self):
exon_list_str = string.join(self._hit_exon_ids,',')
return exon_list_str
def NullProteinExonIDs(self):
exon_list_str = string.join(self._null_exon_ids,',')
return exon_list_str
def SummaryValues(self):
output = self.GeneID()+'|'+self.ExonID()+'|'+self.Probeset()
return output
def __repr__(self): return self.SummaryValues()
def importExonSequenceBuild(filename,exon_db):
###Parse protein sequence
fn=filepath(filename); protein_sequence_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
protein_id,protein_seq = string.split(data,'\t')
protein_sequence_db[protein_id] = protein_seq
###Parse protein/probeset data (built in FeatureAlignment)
filename = string.replace(filename,'SEQUENCE','probeset')
fn=filepath(filename); probeset_protein_db = {}; positive_protein_associations = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t')
try: probeset,protein_hit_id,protein_null_id = t
except ValueError: gene,probeset,exonid,protein_hit_id,protein_null_id = t
try:
ed = exon_db[probeset] ###update this information every time you run (in case a new exon database is used)
ep = ExonProteinAlignmentData(ed.GeneID(),probeset,ed.ExonID(),protein_hit_id,protein_null_id)
probeset_protein_db[probeset] = ep
try: positive_protein_associations[protein_hit_id].append((probeset,ed.ExonID())) ### Record which probesets actually match to which proteins (e.g. informs you which align to which nulls too)
except KeyError: positive_protein_associations[protein_hit_id] = [(probeset,ed.ExonID())]
except KeyError: null=[]
###Determine for each null and hit proteins, what exons are they typically associated with (probably not too informative but record anyways)
for probeset in probeset_protein_db:
ep = probeset_protein_db[probeset]
try:
null_tuple_exonid_list = positive_protein_associations[ep.NullProteinID()]
null_exonid_list = formatExonLists(null_tuple_exonid_list)
except KeyError: null_exonid_list = []
hit_tuple_exonid_list = positive_protein_associations[ep.HitProteinID()]
hit_exonid_list = formatExonLists(hit_tuple_exonid_list)
ep.RecordHitProteinExonIDs(hit_exonid_list)
ep.RecordNullProteinExonIDs(null_exonid_list)
#a = ep.HitProteinExonIDs()
#b = ep.NullProteinExonIDs()
#print ep.HitProteinExonIDs()
#print ep.NullProteinExonIDs();kill
return probeset_protein_db,protein_sequence_db
def formatExonLists(exonid_tuple_lists):
exonid_list=[]; exonid_tuple_lists.sort()
for (probeset,exonid) in exonid_tuple_lists: exonid_list.append(exonid)
return exonid_list
def import_existing_sequence_build(filename):
fn=filepath(filename); transcript_cdna_sequence_dbase = {}; exon_sequence_database = {}; transcript_associations = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
try: array_geneid, altmerge, strand, dna_exon_seq, exon_structure, coding_seq, peptide_length, reference_seq_name,reference_seq, ref_AC, full_length, start_exon, stop_exon, null,null,null,null,refseq_mRNA,null,null,null,null,null,null,null,nmd_call = string.split(data,'\t')
except ValueError: t = string.split(data,'\t');print len(t);kill
if array_geneid == 'array_geneid': title = data
else:
dna_exon_seq = dna_exon_seq[0:-3] #there is a "***" at the end of each transcript sequence
dna_exon_seq = string.split(dna_exon_seq,'('); dna_exon_seq = dna_exon_seq[1:] #get rid of the first blank generated by parsing
### separate the exon number and exon sequence into a tuple and store in order within 'exon_seq_tuple_list'
mRNA_length = 0; exon_seq_tuple_list = []
for exon_seq in dna_exon_seq: #exon_seq : E20)tccccagctttgggtggtgg
exon_num, dna_seq = string.split(exon_seq,')'); mRNA_length += len(dna_seq)
if mRNA_length > 18:
esd = ExonSequenceData(array_geneid,exon_num,dna_seq,mRNA_length,strand)
exon_sequence_database[array_geneid,exon_num] = esd
transcript_data = [int(peptide_length), altmerge,[start_exon, stop_exon], exon_structure, nmd_call, full_length, coding_seq, strand, reference_seq,ref_AC,float(mRNA_length),refseq_mRNA]
try: transcript_cdna_sequence_dbase[array_geneid].append(transcript_data)
except KeyError: transcript_cdna_sequence_dbase[array_geneid] = [transcript_data]
transcript_associations[array_geneid,exon_structure] = altmerge
print "\nNumber of exon sequences imported",len(exon_sequence_database)
print "Number of transcript sequences imported: ",len(transcript_cdna_sequence_dbase)
return transcript_cdna_sequence_dbase, transcript_associations, exon_sequence_database
def exportAltMouseExonSequence():
probeset_exon_db={}; x=0
species = 'Mm'; array_type = 'AltMouse'
critical_exon_import_file = 'AltDatabase/Mm/AltMouse/AltMouse_junction-comparisons.txt'
update.verifyFile(critical_exon_import_file,array_type)
critical_exon_db={}; critical_probesets={}
fn=filepath(critical_exon_import_file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
gene,probeset1,probeset2,critical_exons=string.split(data,'\t')
critical_exons= string.split(critical_exons,'|')
for exon in critical_exons:
try: critical_exon_db[gene,exon].append(probeset1+'-'+probeset2)
except KeyError: critical_exon_db[gene,exon] = [probeset1+'-'+probeset2]
critical_probesets[probeset1]=[]; critical_probesets[probeset2]=[]
probeset_annotations_file = "AltDatabase/Mm/AltMouse/MASTER-probeset-transcript.txt"
update.verifyFile(probeset_annotations_file,array_type)
fn=filepath(probeset_annotations_file)
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x==0: x=1
else:
probeset,affygene,exons,transcript_num,transcripts,probe_type_call,ensembl,block_exon_ids,block_structure,comparison_info = string.split(probeset_data,'\t')
if probeset in critical_probesets:
exons = exons[:-1]; exons = string.split(exons,'-')
affygene = affygene[:-1]
if '|' in exons: print exons;kill
probeset_exon_db[probeset,affygene]=exons
exon_protein_sequence_file = "AltDatabase/Mm/AltMouse/SEQUENCE-transcript-dbase.txt"
update.verifyFile(exon_protein_sequence_file,array_type)
transcript_cdna_sequence_dbase,transcript_associations,exon_sequence_database = import_existing_sequence_build(exon_protein_sequence_file)
critical_exon_seq_export = 'AltDatabase/Mm/AltMouse/AltMouse_critical-exon-seq.txt'
update.verifyFile(critical_exon_seq_export,array_type)
fn=filepath(critical_exon_seq_export)
data = open(fn,'w')
title = ['Affygene:exon','critical_exon-num','critical-probeset-comps']; title = string.join(title,'\t')+'\n'; data.write(title)
for (gene,exon_num) in critical_exon_db:
probeset_comp_list = critical_exon_db[(gene,exon_num)]; probeset_comp_list = string.join(probeset_comp_list,'|')
try: ###Restrict export to previously exported critical exons (ExonAnnotate_module)
exon_sequence_database[(gene,exon_num)]; esd = exon_sequence_database[(gene,exon_num)]
exon_seq = esd.ExonSeq()
exon_data = string.join([gene+':'+exon_num,probeset_comp_list,exon_seq],'\t')+'\n'
data.write(exon_data)
except KeyError: null=[]
data.close()
probeset_seq_file = 'AltDatabase/Mm/AltMouse/probeset_sequence_reversed.txt'
update.verifyFile(probeset_seq_file,array_type)
probeset_seq_db={}; x=0
fn=filepath(probeset_seq_file)
for line in open(fn,'rU').xreadlines():
if x == 0: x=1
else:
data = cleanUpLine(line); t = string.split(data,'\t')
probeset = t[0]
probeset_seq_list = t[1:]
probeset_seq_db[probeset] = probeset_seq_list
critical_junction_seq_export = 'AltDatabase/Mm/AltMouse/AltMouse_critical-junction-seq.txt'
update.verifyFile(critical_junction_seq_export,array_type)
fn=filepath(critical_junction_seq_export)
data = open(fn,'w'); x=0; k=0;l=0
title = ['probeset','probeset-seq','junction-seq']; title = string.join(title,'\t')+'\n'; data.write(title)
for (probeset,gene) in probeset_exon_db:
junction_seq = []; y=0; positions=[]
try:
probeset_seq_list = probeset_seq_db[probeset]
for exon_num in probeset_exon_db[(probeset,gene)]:
try: ###Restrict export to previously exported critical exons (ExonAnnotate_module)
exon_sequence_database[(gene,exon_num)]; esd = exon_sequence_database[(gene,exon_num)]
exon_seq = esd.ExonSeq(); strand = esd.Strand()
junction_seq.append(exon_seq); y+=1
#exon_data = string.join([gene+':'+exon_num,probeset_comp_list,exon_seq],'\t')+'\n'
#data.write(exon_data)
except KeyError: null=[]
#if 'E5' in probeset_exon_db[(probeset,gene)]:
if y>0:
if strand == '-': junction_seq.reverse()
junction_seq_str = string.join(junction_seq,'')
junction_seq_str = string.upper(junction_seq_str)
not_found = 0
for probeset_seq in probeset_seq_list:
#probeset_seq = reverse_string(probeset_seq)
probeset_seq_rev = reverse_orientation(probeset_seq)
if probeset_seq in junction_seq_str:
f = string.find(junction_seq_str,probeset_seq)
positions.append((f,len(probeset_seq)))
k+=1
else:
not_found = 1
x+=1
if not_found == 1:
new_probeset_seq = probeset_seq_list[0] ###pick the first probe sequence found
if len(positions)>0:
positions.sort()
new_probeset_seq = junction_seq_str[positions[0][0]:positions[-1][0]+positions[-1][1]]
#print new_probeset_seq,positions, probeset,probeset_exon_db[(probeset,gene)],probeset_seq_list,junction_seq;kill
junction_seq = string.join(junction_seq,'|') ###indicate where the junction is
probe_seq_data = string.join([probeset,new_probeset_seq,junction_seq],'\t')+'\n'
data.write(probe_seq_data)
except KeyError: null=[]
data.close()
print k,x
def reverse_orientation(sequence):
"""reverse the orientation of a sequence (opposite strand)"""
exchange = []
for nucleotide in sequence:
if nucleotide == 'A': nucleotide = 'T'
elif nucleotide == 'T': nucleotide = 'A'
elif nucleotide == 'G': nucleotide = 'C'
elif nucleotide == 'C': nucleotide = 'G'
exchange.append(nucleotide)
complementary_sequence = reverse_string(exchange)
return complementary_sequence
def reverse_string(astring):
"http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65225"
revchars = list(astring) # string -> list of chars
revchars.reverse() # inplace reverse the list
revchars = ''.join(revchars) # list of strings -> string
return revchars
def compareProteinFeatures(protein_ft,neg_coding_seq,pos_coding_seq):
###Parse out ft-information. Generate ft-fragment sequences for querying
###This is a modification of the original script from FeatureAlignment but simplified for exon analysis
protein_ft_unique=[]; new_ft_list = []
for ft_data in protein_ft:
ft_name = ft_data.PrimaryAnnot(); domain_seq = ft_data.DomainSeq(); annotation = ft_data.SecondaryAnnot()
protein_ft_unique.append((ft_name,annotation,domain_seq))
###Redundant entries that are class objects can't be eliminated, so save to a new list and eliminate redundant entries
protein_ft_unique = unique.unique(protein_ft_unique)
for (ft_name,annotation,domain_seq) in protein_ft_unique:
ft_length = len(domain_seq)
new_ft_data = 'null',domain_seq,ft_name,annotation
new_ft_list.append(new_ft_data)
new_ft_list = unique.unique(new_ft_list)
pos_ft = []; neg_ft = []; all_fts = []
for (pos,seq,ft_name,annot) in new_ft_list:
if seq in pos_coding_seq:
pos_ft.append([pos,seq,ft_name,annot]); all_fts.append([pos,seq,ft_name,annot])
if seq in neg_coding_seq:
neg_ft.append([pos,seq,ft_name,annot]); all_fts.append([pos,seq,ft_name,annot])
all_fts = unique.unique(all_fts)
pos_ft_missing=[]; neg_ft_missing=[]
for entry in all_fts:
if entry not in pos_ft: pos_ft_missing.append(entry)
if entry not in neg_ft: neg_ft_missing.append(entry)
pos_ft_missing2=[]; neg_ft_missing2=[]
for entry in pos_ft_missing: entry[1] = ''; pos_ft_missing2.append(entry)
for entry in neg_ft_missing: entry[1] = ''; neg_ft_missing2.append(entry)
pos_ft_missing2 = unique.unique(pos_ft_missing2)
neg_ft_missing2 = unique.unique(neg_ft_missing2)
return neg_ft_missing2,pos_ft_missing2
def getFeatureIsoformGenomePositions(species,protein_ft_db,mRNA_protein_seq_db,gene_transcript_db,coordinate_type):
""" Adapted from compareProteinFeatures but for one isoform and returns genomic coordinates for each feature
This function is designed to export all unique isoforms rather than just comparison isoforms """
import export
export_file = 'AltDatabase/ensembl/'+species+'/ProteinFeatureIsoform_complete.txt'
export_data = export.ExportFile(export_file)
failed = 0
worked = 0
failed_ac=[]
for gene in protein_ft_db:
transcript_feature_db={}
for ft in protein_ft_db[gene]:
try:
ft_name = ft.PrimaryAnnot(); annotation = ft.SecondaryAnnot()
for (mRNA,type) in gene_transcript_db[gene]:
try:
protein,protein_seq = mRNA_protein_seq_db[mRNA]
error = False
except Exception:
failed_ac.append(mRNA)
error = True
if error == False:
if ft.DomainSeq() in protein_seq:
#if coordinate_type == 'genomic':
pos1_genomic = ft.GenomicStart(); pos2_genomic = ft.GenomicStop()
#else:
pos1 = str(ft.DomainStart()); pos2 = str(ft.DomainEnd())
### There are often many features that overlap within a transcript, so consistently pick just one
if mRNA in transcript_feature_db:
db = transcript_feature_db[mRNA]
if (pos1,pos2) in db:
db[pos1, pos2].append([pos1_genomic, pos2_genomic, protein,ft_name,annotation])
else:
db[pos1, pos2]=[[pos1_genomic, pos2_genomic, protein,ft_name,annotation]]
else:
db={}
db[pos1, pos2]=[[pos1_genomic, pos2_genomic, protein,ft_name,annotation]]
transcript_feature_db[mRNA] = db
#values = [mRNA, protein, pos1, pos2,ft_name,annotation]; unique_entries.append(values)
worked+=1
except IOError:
failed+=1
for transcript in transcript_feature_db:
db = transcript_feature_db[transcript]
for (pos1,pos2) in db:
db[pos1,pos2].sort() ### Pick the alphabetically listed first feature
pos1_genomic, pos2_genomic, protein,ft_name,annotation = db[pos1,pos2][0]
values = [transcript, protein, pos1, pos2,pos1_genomic, pos2_genomic, ft_name,annotation]
export_data.write(string.join(values,'\t')+'\n')
export_data.close()
print failed,'features failed to have corresponding aligned genomic locations out of', worked+failed
failed_ac = unique.unique(failed_ac)
print len(failed_ac),'mRNAs without identified/in silico derived proteins' ### Appear to be ncRNAs without ATGs
print failed_ac[:20]
def identifyAltIsoformsProteinComp(probeset_gene_db,species,array_type,protein_domain_db,compare_all_features,data_type):
""" This function is used by the module IdentifyAltIsoforms to run 'characterizeProteinLevelExonChanges'"""
global protein_ft_db; protein_ft_db = protein_domain_db; protein_domain_db=[]
exon_db={} ### Create a simplified version of the exon_db dictionary with probesets that map to a match and null protein
for probeset in probeset_gene_db:
gene, exon_id = probeset_gene_db[probeset]
ep = ExonProteinAlignmentData(gene,probeset,exon_id,'',''); exon_db[probeset] = ep
global protein_sequence_db
if compare_all_features == 'yes': type = 'seqcomp'
else: type = 'exoncomp'
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
exon_protein_sequence_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+'SEQUENCE-protein-dbase_'+type+'.txt'
else:
exon_protein_sequence_file = 'AltDatabase/'+species+'/'+array_type+'/'+'SEQUENCE-protein-dbase_'+type+'.txt'
probeset_protein_db,protein_sequence_db = importExonSequenceBuild(exon_protein_sequence_file,exon_db)
exon_hits={}
for probeset in probeset_protein_db:
gene = probeset_protein_db[probeset].GeneID()
exon_hits[gene,probeset]=[]
include_sequences = 'no' ### Sequences for comparisons are unnecessary to store. List array-type as exon since AltMouse data has been re-organized, later get rid of AltMouse specific functionality in this function
functional_attribute_db,protein_features = characterizeProteinLevelExonChanges(species,exon_hits,probeset_protein_db,'exon',include_sequences)
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-domain-annotations-'+type+'.txt'
else:
export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-'+type+'.txt'
formatAttributeForExport(protein_features,export_file)
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-protein-annotations-'+type+'.txt'
else:
export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-annotations-'+type+'.txt'
formatAttributeForExport(functional_attribute_db,export_file)
def formatAttributeForExport(attribute_db,filename):
from build_scripts import IdentifyAltIsoforms
export_db={}
for (gene,probeset) in attribute_db:
attribute_list = attribute_db[(gene,probeset)]; attribute_list2=[]
for (attribute,direction) in attribute_list:
attribute = string.replace(attribute,'|',' ')
attribute_list2.append(attribute+'|'+direction)
export_db[probeset]=attribute_list2
print 'Exporting:',filename
IdentifyAltIsoforms.exportSimple(export_db,filename,'')
def importTranscriptBiotypeAnnotations(species):
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt'
accepted_biotypes = ['nonsense_mediated_decay','non_coding','retained_intron','retrotransposed']
fn=filepath(filename); biotype_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
gene,transcript,biotype = string.split(data,'\t')
if biotype in accepted_biotypes:
biotype_db[transcript] = biotype
return biotype_db
def characterizeProteinLevelExonChanges(species,exon_hits,probeset_protein_db,array_type,include_sequences):
"""Examines the the two reciprocal isoforms for overal changes in protein sequence and general sequence composition"""
try: biotype_db = importTranscriptBiotypeAnnotations(species)
except Exception: biotype_db={}
functional_attribute_db={}; protein_features={}
for (array_geneid,uid) in exon_hits: ###uid is probeset or (probeset1,probeset2) value, depending on array_type
if array_type != 'exon' and array_type != 'gene': probeset,probeset2 = uid
else: probeset = uid
hv=0
try:
pp = probeset_protein_db[probeset]; hv=1
pos_ref_AC = pp.HitProteinID()
neg_ref_AC = pp.NullProteinID()
if array_type != 'exon' and array_type != 'gene': ### Instead of using the null_hit, use the second junction probeset
try: np = probeset_protein_db[probeset2]; neg_ref_AC = np.HitProteinID()
except KeyError: null =[] ###just use the existing null
except KeyError: ###occurs if there is not protein associated with the first probeset (NA for exon arrays)
if array_type != 'exon' and array_type != 'gene': ###Reverse of above
try:
np = probeset_protein_db[probeset2]; hv=2
pos_ref_AC = np.NullProteinID()
neg_ref_AC = np.HitProteinID()
except KeyError: hv=0
if hv!=0:
neg_coding_seq = protein_sequence_db[neg_ref_AC]
pos_coding_seq = protein_sequence_db[pos_ref_AC]
pos_biotype=None; neg_biotype=None
if pos_ref_AC in biotype_db: pos_biotype = biotype_db[pos_ref_AC]
if neg_ref_AC in biotype_db: neg_biotype = biotype_db[neg_ref_AC]
neg_length = len(neg_coding_seq)
pos_length = len(pos_coding_seq)
pos_length = float(pos_length); neg_length = float(neg_length)
if array_geneid in protein_ft_db:
protein_ft = protein_ft_db[array_geneid]
neg_ft_missing,pos_ft_missing = compareProteinFeatures(protein_ft,neg_coding_seq,pos_coding_seq)
for (pos,blank,ft_name,annotation) in pos_ft_missing:
call_x = '-'
if len(annotation)>0:
data_tuple = ft_name,call_x
data_tuple = ft_name +'-'+annotation,call_x
else: data_tuple = ft_name,call_x
try: protein_features[array_geneid,uid].append(data_tuple)
except KeyError: protein_features[array_geneid,uid] = [data_tuple]
for (pos,blank,ft_name,annotation) in neg_ft_missing:
###If missing from the negative list, it is present in the positive state
call_x = '+'
if len(annotation)>0:
data_tuple = ft_name,call_x
data_tuple = ft_name +'-'+annotation,call_x
else: data_tuple = ft_name,call_x
try: protein_features[array_geneid,uid].append(data_tuple)
except KeyError: protein_features[array_geneid,uid] = [data_tuple]
call=''
if pos_biotype != None or neg_biotype !=None:
### For example, if one or both transcript(s) are annotated as nonsense_mediated_decay
if (neg_length - pos_length)>0: call = '-'
elif (pos_length - neg_length)>0: call = '+'
else: call = '~'
if pos_biotype != None:
data_tuple = pos_biotype,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]= [data_tuple]
if neg_biotype != None:
data_tuple = neg_biotype,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]= [data_tuple]
if pos_coding_seq[:5] != neg_coding_seq[:5]:
function_var = 'alt-N-terminus'
if (neg_length - pos_length)>0: call = '-'
elif (pos_length - neg_length)>0: call = '+'
else: call = '~'
data_tuple = function_var,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]= [data_tuple]
if pos_coding_seq[-5:] != neg_coding_seq[-5:]:
#print probeset,(1-((neg_length - pos_length)/neg_length)), ((neg_length - pos_length)/neg_length),[neg_length],[pos_length]
#print probeset,(1-((pos_length - neg_length)/pos_length)),((pos_length - neg_length)/pos_length)
if (1-((neg_length - pos_length)/neg_length)) < 0.5 and ((neg_length - pos_length)/neg_length) > 0 and (pos_coding_seq[:5] == neg_coding_seq[:5]):
function_var = 'truncated'
call = '+'; data_tuple = function_var,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]=[data_tuple]
elif (1-((pos_length - neg_length)/pos_length)) < 0.5 and ((pos_length - neg_length)/pos_length) > 0 and (pos_coding_seq[:5] == neg_coding_seq[:5]):
function_var = 'truncated'
call = '-'; data_tuple = function_var,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]=[data_tuple]
else:
function_var = 'alt-C-terminus'
if (neg_length - pos_length)>0: call = '-'
elif (pos_length - neg_length)>0: call = '+'
else: call = '~'
data_tuple = function_var,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]=[data_tuple]
if call == '':
if pos_coding_seq != neg_coding_seq:
function_var = 'alt-coding'
if (neg_length - pos_length)>0: call = '-'
elif (pos_length - neg_length)>0: call = '+'
else: call = '~'
data_tuple = function_var,call
try: functional_attribute_db[array_geneid,uid].append(data_tuple)
except KeyError: functional_attribute_db[array_geneid,uid]= [data_tuple]
### Record change in peptide size
if neg_length > pos_length: fcall = '-'
elif neg_length < pos_length: fcall = '+'
elif neg_length == pos_length: fcall = '~'
if len(pos_ref_AC)<1: pos_ref_AC = 'NULL'
if len(neg_ref_AC)<1: neg_ref_AC = 'NULL'
if fcall == '-':
function_var1 = 'AA:' + str(int(pos_length))+'('+pos_ref_AC+')' +'->'+ str(int(neg_length))+'('+neg_ref_AC+')'
if include_sequences == 'yes': function_var2 = 'sequence: ' +'('+pos_ref_AC+')'+pos_coding_seq +' -> '+ '('+neg_ref_AC+')'+neg_coding_seq
else:
function_var1 = 'AA:' +str(int(neg_length))+ '('+neg_ref_AC+')' +'->'+ str(int(pos_length))+'('+pos_ref_AC+')'
if include_sequences == 'yes': function_var2 = 'sequence: ' +'('+neg_ref_AC+')'+neg_coding_seq +' -> '+'('+pos_ref_AC+')'+pos_coding_seq
data_tuple1 = function_var1,fcall
try: functional_attribute_db[array_geneid,uid].append(data_tuple1)
except KeyError: functional_attribute_db[array_geneid,uid]= [data_tuple1]
### Record sequence change
if include_sequences == 'yes':
data_tuple2 = function_var2,fcall
try: functional_attribute_db[array_geneid,uid].append(data_tuple2)
except KeyError: functional_attribute_db[array_geneid,uid]= [data_tuple2]
print len(functional_attribute_db),'Genes with affected functional attributes'
return functional_attribute_db,protein_features
def combine_databases(db1,db2):
for key in db2:
if key not in db1:
db1[key] = db2[key]
return db1
if __name__ == '__main__':
species = 'Mm'; array_type='AltMouse'
probeset_protein_db = exon_sequence_database
protein_sequence_db = uniprot_seq_string
exon_hits={}
exon_hits[('ENSG00000130939', 'E9-3|')] = [['E9-3', '2319586']]
functional_attribute_db,protein_features = characterizeProteinLevelExonChanges(exon_hits,probeset_protein_db)
sys.exit()
exon_protein_sequence_file = "AltDatabase/"+species+"/"+array_type+"/"+"SEQUENCE-protein-dbase.txt"
transcript_cdna_sequence_dbase,uniprot_seq_string = importExonSequenceBuild(exon_protein_sequence_file)
sys.exit()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ExonAnalyze_module.py
|
ExonAnalyze_module.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import export
import update
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".all" or entry[-5:] == ".data" or entry[-3:] == ".fa":
dir_list2.append(entry)
return dir_list2
def returnDirectories(sub_dir):
dir=os.path.dirname(dirfile.__file__)
dir_list = os.listdir(dir + sub_dir)
###Below code used to prevent FILE names from being included
dir_list2 = []
for entry in dir_list:
if "." not in entry: dir_list2.append(entry)
return dir_list2
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getEnsemblAnnotations(filename,symbol_ensembl_db):
fn=filepath(filename)
print 'importing', filename
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
for line in open(fn,'r').xreadlines():
data, newline= string.split(line,'\n')
t = string.split(data,'\t'); ensembl=t[0]
try: symbol = t[2]
except IndexError: symbol = ''
if len(symbol)>1 and len(ensembl)>1:
try: symbol_ensembl_db[string.upper(symbol)].append(ensembl)
except KeyError: symbol_ensembl_db[string.upper(symbol)] = [ensembl]
return symbol_ensembl_db
def getCurrentEnsembls(symbol_ensembl_old):
### Get Ensembl gene annotaitons for the current version of Ensembl only
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt'
symbol_ensembl_current = getEnsemblAnnotations(filename,{})
all_current_ensembls={}
for symbol in symbol_ensembl_current:
if len(symbol_ensembl_current[symbol][0])<3: print symbol_ensembl_current[symbol][0]
all_current_ensembls[symbol_ensembl_current[symbol][0]] = []
### Augment with UniProt Symbols and Aliases
uniprot_symbol_ensembl = importUniProtAnnotations()
for symbol in uniprot_symbol_ensembl:
ensembls = uniprot_symbol_ensembl[symbol]
if symbol not in symbol_ensembl_current:
for ensembl in ensembls:
if ensembl in all_current_ensembls:
try: symbol_ensembl_current[symbol].append(ensembl)
except Exception: symbol_ensembl_current[symbol] = [ensembl]
for symbol in symbol_ensembl_old:
ensembls = symbol_ensembl_old[symbol]
if symbol not in symbol_ensembl_current:
for ensembl in ensembls:
if ensembl in all_current_ensembls:
try: symbol_ensembl_current[symbol].append(ensembl)
except Exception: symbol_ensembl_current[symbol] = [ensembl]
return symbol_ensembl_current
def processEnsemblAnnotations():
global redundant_ensembl_by_build; redundant_ensembl_by_build={}; symbol_ensembl={}
###This is done for archived gene annotations from Ensembl
filename = 'AltDatabase/miRBS/'+species+'/'+species+'_Ensembl-annotations_simple.txt'
try: symbol_ensembl = getEnsemblAnnotations(filename,symbol_ensembl)
except Exception: symbol_ensembl={}
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt'
symbol_ensembl_current = getCurrentEnsembls(symbol_ensembl)
for symbol in symbol_ensembl_current:
for ensembl in symbol_ensembl_current[symbol]:
try: symbol_ensembl[symbol].append(ensembl)
except Exception: symbol_ensembl[symbol] = [ensembl]
for symbol in symbol_ensembl:
if len(symbol_ensembl[symbol])>1: ###Thus there are more than one Ensembl gene IDs that correspond... probably due to versioning (which we've seen)
for ensembl in symbol_ensembl[symbol]:
for ensembl2 in symbol_ensembl[symbol]:
if ensembl != ensembl2:
try: redundant_ensembl_by_build[ensembl].append(ensembl2)
except KeyError: redundant_ensembl_by_build[ensembl] = [ensembl2]
print 'len(symbol_ensembl)',len(symbol_ensembl)
for transcript in ens_gene_to_transcript:
if len(ens_gene_to_transcript[transcript])>1:
for ensembl in ens_gene_to_transcript[transcript]:
for ensembl2 in ens_gene_to_transcript[transcript]:
if ensembl != ensembl2:
try: redundant_ensembl_by_build[ensembl].append(ensembl2)
except KeyError: redundant_ensembl_by_build[ensembl] = [ensembl2]
redundant_ensembl_by_build = eliminateRedundant(redundant_ensembl_by_build)
return symbol_ensembl,symbol_ensembl_current
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term in data_dir: matches.append(data_dir)
return matches
class MicroRNATargetData:
def __init__(self,gene,gene_symbol,mir,mir_sequences,source):
self._geneid = gene; self._symbol = gene_symbol; self._mir = mir; self._source = source; self._mir_sequences = mir_sequences
def MicroRNA(self): return self._mir
def GeneID(self): return self._geneid
def Symbol(self): return self._symbol
def Source(self): return self._source
def Sequences(self): return self._mir_sequences
def setScore(self,score): self.score = score
def setCoordinates(self,coord): self.coord = coord
def Score(self): return self.score
def Coordinates(self): return self.coord
def Output(self):
export_line=string.join([self.MicroRNA(),self.GeneID(),self.Sequences(),self.Coordinates()],'\t')+'\n'
return export_line
def Report(self):
output = self.GeneID()
return output
def __repr__(self): return self.Report()
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def mirHitImport():
try:
filename = 'AltDatabase/miRBS/'+species+'/'+'hits.txt'
print 'parsing', filename
fn=filepath(filename); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
mir = t[0]; fold = t[1]; mir_hit_db[mir] = fold
except IOError: mir_hit_db={}
def importExpressionData():
global upregulated; global downregulated
try:
filename = 'AltDatabase/miRBS/'+species+'/'+'expression-data.txt'
print 'parsing', filename
fn=filepath(filename); x=0; upregulated={}; downregulated={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene = t[0]; log_fold_change = float(t[-2]); ttest = float(t[-1])
if ttest<0.05 and log_fold_change>1: upregulated[gene] = log_fold_change,ttest
if ttest<0.05 and log_fold_change<-1: downregulated[gene] = log_fold_change,ttest
except IOError: upregulated={}; downregulated={}
def pictarImport(parse_sequences,type,added):
"""Annotations originally from the file: ng1536-S3.xls, posted as supplementary data at:
http://www.nature.com/ng/journal/v37/n5/suppinfo/ng1536_S1.html. The file being parsed here has been pre-matched to Ensembl IDs
using the ExonModule of LinkEST, for human."""
mir_sequences=[]
if species == 'Mm': filename = 'AltDatabase/miRBS/'+species+'/'+'pictar-target-annotated.txt'; tax = '10090'
else: filename = 'AltDatabase/miRBS/'+'Mm'+'/'+'pictar-target-annotated.txt'; tax = '10116'
#if species == 'Hs': filename = 'AltDatabase/miRBS/'+species+'/'+'pictar-conserved-targets-2005.txt'; tax = '9606'
if type == 'pre-computed':
if species == 'Hs': filename = 'AltDatabase/miRBS/'+species+'/'+'pictar-conserved-targets-2005.txt'; tax = '9606'
else:
if species == 'Hs': filename = 'AltDatabase/miRBS/'+'Mm'+'/'+'pictar-target-annotated.txt'; tax = '9606'
import AltAnalyze
###Get taxid annotations from the GO-Elite config
species_annot_db=AltAnalyze.importGOEliteSpeciesInfo(); tax_db={}
for species_full in species_annot_db:
if species==species_annot_db[species_full].SpeciesCode():
tax = species_annot_db[species_full].TaxID()
print 'parsing', filename; count=0
print 'len(symbol_ensembl)', len(symbol_ensembl)
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
fn=filepath(filename); x=1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
if species == 'Hs':
if type == 'pre-computed':
ensembl_geneid, mir, mir_sequences = t; ensembl_geneids = [ensembl_geneid]
else:
symbol=string.upper(t[2]);mir=t[6];mir_sequences=t[11]
if symbol in symbol_ensembl and len(symbol)>0: ensembl_geneids=symbol_ensembl[symbol]
else: ensembl_geneids=['']
elif species == 'Mm':
mm_symbol=string.upper(t[3]);mir=t[6];mir_sequences=t[11]; mir = string.replace(mir,'hsa','mmu')
if mm_symbol in symbol_ensembl and len(mm_symbol)>0: ensembl_geneids=symbol_ensembl[mm_symbol]
else: ensembl_geneids=['']
elif species == 'Rn':
mm_symbol=string.upper(t[3]);mir=t[6];mir_sequences=t[11]; mir = string.replace(mir,'hsa','rno')
if mm_symbol in symbol_ensembl and len(mm_symbol)>0: ensembl_geneids=symbol_ensembl[mm_symbol]
else: ensembl_geneids=['']
else:
mm_symbol=string.upper(t[3]);mir=t[6];mir_sequences=t[11]
if mm_symbol in symbol_ensembl and len(mm_symbol)>0: ensembl_geneids=symbol_ensembl[mm_symbol]
else: ensembl_geneids=['']
for ensembl_geneid in ensembl_geneids:
if len(ensembl_geneid)>1 and (ensembl_geneid,mir) not in added:
if parse_sequences == 'yes':
if (mir,ensembl_geneid) in combined_results:
combined_results[(mir,ensembl_geneid)].append(string.upper(mir_sequences)); count+=1
else:
#if count < 800 and '-125b' in mir: print ensembl_geneid, mir, mm_symbol; count+=1
#elif count>799: kill
y = MicroRNATargetData(ensembl_geneid,'',mir,mir_sequences,'pictar'); count+=1
try: microRNA_target_db[mir].append(y)
except KeyError: microRNA_target_db[mir] = [y]
added[(ensembl_geneid,mir)]=[]
print count, 'miRNA-target relationships added for PicTar'
return added
def sangerImport(parse_sequences):
""""Sanger center (miRBase) sequence was provided as a custom (requested) dump of their v5 target predictions
(http://microrna.sanger.ac.uk/targets/v5/), containing Ensembl gene IDs, microRNA names, and putative target
sequences, specific for either mouse or human. Mouse was requested in late 2005 whereas human in late 2007.
These same annotation files, missing the actual target sequence but containing an ENS transcript and coordinate
locations for that build (allowing seqeunce extraction with the appropriate Ensembl build) exist at:
http://microrna.sanger.ac.uk/cgi-bin/targets/v5/download.pl"""
if species == 'Hs': filename = 'AltDatabase/miRBS/'+species+'/'+'mirbase-v5_homo_sapiens.mirna.txt'; prefix = 'hsa-'
if species == 'Rn': filename = 'AltDatabase/miRBS/'+species+'/'+'sanger_miR_target_predictions.txt'; prefix = 'rno-'
if species == 'Mm': filename = 'AltDatabase/miRBS/'+species+'/'+'sanger_miR_target_predictions.txt'; prefix = 'mmu-'
print 'parsing', filename; count=0
fn=filepath(filename); x=1; mir_sequences=[]
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
ensembl_geneids=[]
if species == 'Hs':
try:
mir = t[1]; ens_transcript = t[2]; ensembl_geneid = t[17]; mir_sequences = string.upper(t[14])
ensembl_geneids.append(ensembl_geneid)
except IndexError: print line;kill
elif species == 'Mm':
ens_transcript,mir,mir_sequences = t
if ens_transcript in ens_gene_to_transcript:
ensembl_geneids = ens_gene_to_transcript[ens_transcript]; ensembl_geneid = ensembl_geneids[0]
elif species == 'Rn':
ensembl_geneid,mir,mir_sequences = t
mir_sequences = string.lower(mir_sequences); mir = string.replace(mir,'hsa','rno'); mir = string.replace(mir,'mmu','rno')
ensembl_geneids=[ensembl_geneid]
geneid_ls=[]
#mir_sequences = string.replace(mir_sequences,'-',''); mir_sequences = string.replace(mir_sequences,'=','')
#mir_sequences = string.upper(mir_sequences)
#if 'GGCTCCTGTCACCTGGGTCCGT' in mir_sequences:
#print ensembl_geneid, mir; sys.exit()
for ensembl_geneid in ensembl_geneids:
if ensembl_geneid in redundant_ensembl_by_build: ###Thus there are redundant geneids
geneid_ls += redundant_ensembl_by_build[ensembl_geneid]+[ensembl_geneid]
else: geneid_ls += [ensembl_geneid]
if species == 'Hs':
if ens_transcript in ens_gene_to_transcript: geneid_ls+= ens_gene_to_transcript[ens_transcript]
geneid_ls = unique.unique(geneid_ls)
if len(geneid_ls) == 1 and geneid_ls[0]=='': null =[] ###not a valid gene
elif prefix in mir:
for ensembl_geneid in geneid_ls:
if parse_sequences == 'yes':
if (mir,ensembl_geneid) in combined_results:
mir_sequences = string.replace(mir_sequences,'-',''); mir_sequences = string.replace(mir_sequences,'=',''); count+=1
combined_results[(mir,ensembl_geneid)].append(string.upper(mir_sequences))
else:
if prefix in mir:
y = MicroRNATargetData(ensembl_geneid,'',mir,mir_sequences,'mirbase'); count+=1
try: microRNA_target_db[mir].append(y)
except KeyError: microRNA_target_db[mir] = [y]
print count, 'miRNA-target relationships added for mirbase'
def downloadFile(file_type):
import UI
file_location_defaults = UI.importDefaultFileLocations()
try:
fld = file_location_defaults[file_type]
url = fld.Location()
except Exception:
for fl in fld:
if species in fl.Species(): url = fl.Location()
if 'Target' in file_type: output_dir = 'AltDatabase/miRBS/'
else: output_dir = 'AltDatabase/miRBS/'+species + '/'
gz_filepath, status = update.download(url,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except Exception: status = status
filename = string.replace(gz_filepath,'.zip','.txt')
filename = string.replace(filename,'.gz','.txt')
filename = string.replace(filename,'.txt.txt','.txt')
return filename
def verifyExternalDownload(file_type):
### Adapted from the download function - downloadFile()
import UI
file_location_defaults = UI.importDefaultFileLocations()
try:
fld = file_location_defaults[file_type]
url = fld.Location()
except Exception:
for fl in fld:
if species in fl.Species(): url = fl.Location()
if 'Target' in file_type: output_dir = 'AltDatabase/miRBS/'
else: output_dir = 'AltDatabase/miRBS/'+species + '/'
filename = url.split('/')[-1]
output_filepath = filepath(output_dir+filename)
filename = string.replace(output_filepath,'.zip','.txt')
filename = string.replace(filename,'.gz','.txt')
filename = string.replace(filename,'.txt.txt','.txt')
counts = verifyFile(filename,'counts')
if counts < 9: validated = 'no'
else: validated = 'yes'
return validated, filename
def TargetScanImport(parse_sequences,force):
"""The TargetScan data is currently extracted from a cross-species conserved family file. This file only contains
gene symbol, microRNA name and 3'UTR seed locations."""
if species == 'Mm': tax = '10090'; prefix = 'mmu-'
elif species == 'Hs': tax = '9606'; prefix = 'hsa-'
elif species == 'Rn': tax = '10116'; prefix = 'rno-'
else: prefix = 'hsa-'
import AltAnalyze
###Get taxid annotations from the GO-Elite config
species_annot_db=AltAnalyze.importGOEliteSpeciesInfo(); tax_db={}
for species_full in species_annot_db:
if species==species_annot_db[species_full].SpeciesCode():
tax = species_annot_db[species_full].TaxID()
global l
### See if the files are already there
verifyTSG, target_scan_target_file = verifyExternalDownload('TargetScanGenes')
verifyTSS, target_scan_sequence_file = verifyExternalDownload('TargetScanSequences')
if verifyTSG == 'no' or verifyTSS == 'no': ### used to be - if force == 'yes'
if parse_sequences == 'no':
### Then download the latest annotations and sequences
target_scan_target_file = downloadFile('TargetScanGenes')
target_scan_sequence_file = downloadFile('TargetScanSequences')
### Cross-species TargetScan file with UTR seqeunces for all genes with reported targets in the conserved family file
### Although this file includes valid sequence data that appears to match up to the target file, the target file
### appears to only list the seed seqeunce location (UTR start and stop) and not the full binding sequence and thus
### is not ammenable to probe set alignment.
print 'parsing', target_scan_sequence_file
fn=filepath(target_scan_sequence_file); x=0; target_scan_gene_utr_seq={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
symbol = string.upper(t[2]); tax_id = t[3]; utr_seq = t[4]
if tax_id == tax:
utr_seq_no_gaps = string.replace(utr_seq,'-','')
utr_seq_no_gaps = string.replace(utr_seq_no_gaps,'U','T')
if symbol in symbol_ensembl_current and len(utr_seq_no_gaps)>0:
target_scan_gene_utr_seq[symbol] = utr_seq_no_gaps
print 'UTR sequence for',len(target_scan_gene_utr_seq),'TargetScan genes stored in memory.'
mir_sequences = []; count=0
print 'parsing', target_scan_target_file
#verifyFile(target_scan_target_file,species) ### Makes sure file is local and if not downloads.
fn=filepath(target_scan_target_file); x=0; k=[]; l=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x=1
data = string.lower(data)
t = string.split(data,'\t')
i=0
for value in t:
if 'mir' in value: m = i
elif 'gene id' in value: g = i
elif 'gene symbol' in value: s = i
elif 'transcript' in value: r = i
elif 'species id' in value: txi = i
elif 'utr start' in value: us = i
elif 'utr end' in value: ue = i
i+=1
else:
mir = t[m]; geneid = t[g]; gene_symbol = string.upper(t[s]); taxid = t[txi]; utr_start = int(t[us]); utr_end = int(t[ue])
### Old format
#mir = t[0]; gene_symbol = string.upper(t[1]); taxid = t[2]; utr_start = t[3]; utr_end = t[4]
if '/' in mir:
mir_list=[]
mirs = string.split(mir,'/')
for mirid in mirs[1:]:
mirid = 'miR-'+mirid
mir_list.append(mirid)
mir_list.append(mirs[0])
else: mir_list = [mir]
if taxid == tax: ###human
#target_scan_gene_utr_seq[symbol] = utr_seq_no_gaps
if gene_symbol in symbol_ensembl_current: ensembl_geneids = symbol_ensembl_current[gene_symbol]; proceed = 'yes'; k.append(gene_symbol)
else: proceed = 'no'; l.append(gene_symbol)
if gene_symbol in target_scan_gene_utr_seq:
### TargetScan provides the core, while processed miRs are typically 22nt - seems to approximate other databases better
adj_start = utr_start-15
if adj_start < 0: adj_start=0
mir_sequences = target_scan_gene_utr_seq[gene_symbol][adj_start:utr_end+1]
#if string.lower(gene_symbol) == 'tns3' and mir == 'miR-182': print mir,gene_symbol,taxid,utr_start,utr_end,mir_sequences
else: mir_sequences=[]
###Already multiple geneids associated with each symbol so don't need to worry about renundancy
if proceed == 'yes':
for ensembl_geneid in ensembl_geneids:
for mir in mir_list:
#if ensembl_geneid == 'ENSG00000137815' and mir == 'miR-214': print mir,gene_symbol,taxid,utr_start,utr_end,mir_sequences,target_scan_gene_utr_seq[gene_symbol];sys.exit()
if parse_sequences == 'yes':
if (prefix+mir,ensembl_geneid) in combined_results:
combined_results[(prefix+mir,ensembl_geneid)].append(mir_sequences); count+=1
else:
#if ensembl_geneid == 'ENSMUSG00000029467': print mir
y = MicroRNATargetData(ensembl_geneid,gene_symbol,mir_sequences,prefix+mir,'TargetScan')
count+=1
try: microRNA_target_db[prefix+mir].append(y)
except KeyError: microRNA_target_db[prefix+mir] = [y]
k = unique.unique(k); l = unique.unique(l)
print 'ensembls-found:',len(k),', not found:',len(l)
print l[:10]
print count, 'miRNA-target relationships added for TargetScan'
def mirandaImport(parse_sequences,force):
"""Miranda data is avaialble from two file types from different websites. The first is human-centric with multi-species
target alignment information organized by Ensembl gene ID (http://cbio.mskcc.org/research/sander/data/miRNA2003/mammalian/index.html).
A larger set of associations was also pulled from species specific files (http://www.microrna.org/microrna/getDownloads.do),
where gene symbol was related to Ensembl gene. Both files provided target microRNA sequence."""
### Then download the latest annotations and sequences
if parse_sequences == 'coordinates':
export_object = export.ExportFile('miRanda/'+species+'/miRanda.txt')
print "Exporting to:"+'miRanda/'+species+'/miRanda.txt'
verify, filename = verifyExternalDownload('miRanda')
if verify == 'no': filename = downloadFile('miRanda')
print 'parsing', filename; count=0; null_count=[]
fn=filepath(filename); x=1; mir_sequences=[]
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
symbol = string.upper(t[3]); mir = t[1]; entrez_gene = t[2]; mir_sequences = string.upper(t[8])
mir_sequences = string.replace(mir_sequences,'-',''); mir_sequences = string.replace(mir_sequences,'=','')
mir_sequences = string.replace(mir_sequences,'U','T')
#if 'GGCTCCTGTCACCTGGGTCCGT' in mir_sequences:
#print symbol, mir; sys.exit()
ensembl_gene_ids = []
if symbol in symbol_ensembl_current:
ensembl_gene_ids = symbol_ensembl_current[symbol]
else: ensembl_gene_ids=[]; null_count.append(symbol)
if len(ensembl_gene_ids) > 0:
for ensembl_geneid in ensembl_gene_ids:
if parse_sequences == 'yes':
if (mir,ensembl_geneid) in combined_results:
combined_results[(mir,ensembl_geneid)].append(string.upper(mir_sequences)); count+=1
else:
y = MicroRNATargetData(ensembl_geneid,'',mir,mir_sequences,'miRanda'); count+=1
try: microRNA_target_db[mir].append(y)
except KeyError: microRNA_target_db[mir] = [y]
if parse_sequences == 'coordinates':
"""
genome_coord = string.split(t[13],':')[1:]; chr = 'chr'+ genome_coord[0]
strand = genome_coord[-1]; start, stop = string.split(genome_coord[1],'-')
"""
genome_coord = t[13][1:-1]
align_score = t[15]
y.setScore(align_score); y.setCoordinates(genome_coord)
export_object.write(y.Output())
print count, 'miRNA-target relationships added for miRanda'
null_count = unique.unique(null_count)
print len(null_count), 'missing symbols',null_count[:10]
if parse_sequences == 'coordinates': export_object.close()
def importEnsTranscriptAssociations(ens_gene_to_transcript,type):
###This function is used to extract out EnsExon to EnsTranscript relationships to find out directly
###which probesets associate with which transcripts and then which proteins
if type == 'current':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
else: filename = 'AltDatabase/miRBS/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
print 'parsing', filename
fn=filepath(filename)
verifyFile(filename,species)
for line in open(fn,'rU').readlines():
data,null = string.split(line,'\n')
t = string.split(data,'\t')
ens_geneid,chr,strand,exon_start,exon_end,ens_exonid,constitutive_exon,ens_transcript_id = t
try: ens_gene_to_transcript[ens_transcript_id].append(ens_geneid)
except KeyError:ens_gene_to_transcript[ens_transcript_id] = [ens_geneid]
ens_gene_to_transcript = eliminateRedundant(ens_gene_to_transcript)
print 'len(ens_gene_to_transcript)',len(ens_gene_to_transcript)
return ens_gene_to_transcript
def findMirTargetOverlaps():
print 'Number of microRNAs to be analyzed:',len(mir_hit_db)
print "Number of microRNAs which have predicted targets:", len(microRNA_target_db)
combined_output = 'AltDatabase/'+species+'/SequenceData/'+'miRBS-combined_gene-targets.txt'
data1 = export.ExportFile(combined_output)
mir_gene_mult_hits={}
for mir in microRNA_target_db:
if mir in mir_hit_db:
output_file = 'AltDatabase/'+species+'/SequenceData/'+mir+'_gene-targets.txt'; output_file = string.replace(output_file,'*','-')
data = export.ExportFile(output_file); delete = {}
data = export.ExportFile(output_file); delete = {}
title = ['TargetID','Evidence']; title = string.join(title,'\t')+'\n'; data.write(title)
matches=[]; source_mir_db = {}; hit=0
source_db={}
for tg in microRNA_target_db[mir]:
try:
try: source_db[tg.GeneID()].append(tg.Source())
except KeyError: source_db[tg.GeneID()] = [tg.Source()]
except TypeError: print tg.Source(),tg.GeneID();kill
source_db = eliminateRedundant(source_db)
for gene in source_db:
for source in source_db[gene]: source_mir_db[source]=[]
sources = string.join(source_db[gene],'|')
y = MicroRNATargetData(gene,'',mir,'',sources)
if mir in mir_hit_db:
try: mir_gene_mult_hits[mir].append(y)
except KeyError: mir_gene_mult_hits[mir] = [y]
else: delete[mir] = []
values = [gene,sources]; values = string.join(values,'\t')+'\n'
values2 = [mir,gene,sources]; values2 = string.join(values2,'\t')+'\n'
if len(source_db[gene])>1:
matches.append(gene)
if mir in mir_hit_db: data.write(values); hit+=1
data1.write(values2)
if mir in mir_hit_db:
print len(source_db),'genes associated with',mir+'.',len(source_mir_db),'sources linked to gene. Targets with more than one associated Dbase:', len(matches)
if mir in mir_hit_db:
data.close()
if len(source_mir_db)<4 or hit<5:
try: del mir_gene_mult_hits[mir]
except KeyError: null = []
os.remove(fn)
data1.close()
for mir in mir_gene_mult_hits:
mir_fold = mir_hit_db[mir]
if mir_fold>0: mir_direction = 'up'
else: mir_direction = 'down'
output_file = 'AltDatabase/regulated/'+mir+'_regualted-gene-targets.txt'; output_file = string.replace(output_file,'*','-')
fn=filepath(output_file);data = open(fn,'w')
title = ['TargetID','Evidence','CSd40_vs_ESCs-log_fold','CSd40_vs_ESCs-ttest']; title = string.join(title,'\t')+'\n'; data.write(title)
hit = 0
if mir in mir_gene_mult_hits:
for y in mir_gene_mult_hits[mir]:
gene = y.GeneID(); sources = y.Source()
if mir_direction == 'up': ###thus look for down-regulated targets
if gene in downregulated:
hit +=1
log_fold,ttest = downregulated[gene]
values = [gene,sources,str(log_fold),str(ttest)]; values = string.join(values,'\t')+'\n';data.write(values)
if mir_direction == 'down': ###thus look for up-regulated targets
if gene in upregulated:
log_fold,ttest = upregulated[gene]
values = [gene,sources,str(log_fold),str(ttest)]; values = string.join(values,'\t')+'\n';data.write(values)
print hit,'regualted target genes associated with',mir
data.close()
if hit<5: os.remove(fn)
def eliminateRedundant(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def importUniProtAnnotations():
#g = GrabFiles(); g.setdirectory('/AltDatabase/miRBS/'+species); filenames = g.searchdirectory('UniProt')
filename = 'AltDatabase/uniprot/'+species+'/uniprot_sequence.txt'
fn=filepath(filename); uniprot_symbol_ensembl={}
for line in open(fn,'rU').readlines():
data,null = string.split(line,'\n') #remove endline
t = string.split(data,'\t') #remove endline
symbols = t[3]; ###ASPN; ORFNames=RP11-77D6.3-002, hCG_1784540
symbols = string.split(symbols,';'); primary_symbol = symbols[0]
if len(symbols)>1:
null,symbols = string.split(symbols[1],'=')
symbols = string.split(symbols,', '); symbols = symbols+[primary_symbol]
else: symbols = [primary_symbol]
ensembl_ids = t[4]; ensembl_ids = string.split(ensembl_ids,",")
for symbol in symbols:
if len(symbol)>0:
if len(ensembl_ids)>0: uniprot_symbol_ensembl[string.upper(symbol)] = ensembl_ids #; print symbol,ensembl_ids;kil
return uniprot_symbol_ensembl
def exportCombinedMirResultSequences():
combined_input = 'AltDatabase/'+species+'/SequenceData/'+'miRBS-combined_gene-targets.txt'
parse_sequences = 'yes'; global combined_results; combined_results={}
fn=filepath(combined_input); combined_results2={} ###the second is for storing source information
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n') #remove endline
t = string.split(data,'\t') #remove endline
mir, gene, sources = t
combined_results[mir,gene] = []
combined_results2[mir,gene] = sources
microRNA_target_db = {}; mir_hit_db = {}
try: importmiRNAMap(parse_sequences,Force)
except Exception: null=[] ### occurs when species is not supported
try: mirandaImport(parse_sequences,'yes')
except Exception: null=[]
try: TargetScanImport(parse_sequences,'yes')
except Exception: null=[]
try: sangerImport(parse_sequences)
except Exception: null=[]
try: added = pictarImport(parse_sequences,'pre-computed',{})
except Exception: null=[]
try: added = pictarImport(parse_sequences,'symbol-based',added)
except Exception: null=[]
output_file = 'AltDatabase/'+species+'/SequenceData/'+'miRBS-combined_gene-target-sequences.txt'
combined_results = eliminateRedundant(combined_results); fn=filepath(output_file);data = open(fn,'w')
for (mir,gene) in combined_results:
sequences = combined_results[(mir,gene)]
sources = combined_results2[(mir,gene)]
sequences = string.join(sequences,'|')
sequences = string.replace(sequences,' ','|') ### Seems to occur with PicTar
data.write(mir+'\t'+gene+'\t'+sequences+'\t'+sources+'\n')
data.close()
def importmiRNAMap(parse_sequences,force):
""" Added in AltAnalyze version 2.0, this database provides target sequences for several species and different databases,
including miRanda, RNAhybrid and TargetScan. For more information see: http://mirnamap.mbc.nctu.edu.tw/html/about.html"""
gz_filepath = verifyFileAdvanced('miRNA_targets_',species)
if force == 'yes' or len(gz_filepath)==0:
import UI; species_names = UI.getSpeciesInfo()
species_full = species_names[species]
species_full = string.replace(species_full,' ','_')
miRNAMap_dir = update.getFTPData('mirnamap.mbc.nctu.edu.tw','/miRNAMap2/miRNA_Targets/'+species_full,'.txt.tar.gz')
output_dir = 'AltDatabase/miRBS/'+species+'/'
gz_filepath, status = update.download(miRNAMap_dir,output_dir,'')
if status == 'not-removed':
try: os.remove(gz_filepath) ### Not sure why this works now and not before
except OSError: status = status
fn=filepath(string.replace(gz_filepath,'.tar.gz','')); x=0; count=0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
try:
miRNA, ensembl_transcript_id, target_start, target_end, miRNA_seq, alignment, target_seq, algorithm, c1, c2, c3 = t
#if 'GGCTCCTGTCACCTGGGTCCGT'in target_seq:
#print 'a'; sys.exit()
#if 'TCF7L1' in symbol or 'TCF3' in symbol:
#if '-422a' in miRNA:
#print miRNA;sys.exit()
#print symbol, mir; sys.exit()
if ensembl_transcript_id in ens_gene_to_transcript:
geneids = ens_gene_to_transcript[ensembl_transcript_id]
target_seq = string.upper(string.replace(target_seq,'-',''))
target_seq = string.replace(target_seq,'U','T')
for ensembl_geneid in geneids:
if parse_sequences == 'yes':
if (miRNA,ensembl_geneid) in combined_results:
combined_results[(miRNA,ensembl_geneid)].append(target_seq)
else:
y = MicroRNATargetData(ensembl_geneid,'',miRNA,target_seq,algorithm); count+=1
try: microRNA_target_db[miRNA].append(y)
except KeyError: microRNA_target_db[miRNA] = [y]
except Exception: x=1 ### Bad formatting
print count, 'miRNA-target relationships added for mirnamap'
return count
def exportMiRandaPredictionsOnly(Species,Force,Only_add_sequence_to_previous_results):
global species; global only_add_sequence_to_previous_results; global symbol_ensembl; global force
global ens_gene_to_transcript; global microRNA_target_db; global mir_hit_db; global parse_sequences
global symbol_ensembl_current; combined_results={}
species = Species; compare_to_user_data = 'no'; force = Force
only_add_sequence_to_previous_results = Only_add_sequence_to_previous_results
try: ens_gene_to_transcript = importEnsTranscriptAssociations({},'archive')
except Exception: ens_gene_to_transcript={} ### Archived file not on server for this species
ens_gene_to_transcript = importEnsTranscriptAssociations(ens_gene_to_transcript,'current')
symbol_ensembl,symbol_ensembl_current = processEnsemblAnnotations()
microRNA_target_db = {}; mir_hit_db = {}
if only_add_sequence_to_previous_results != 'yes':
parse_sequences = 'coordinates'
try: del symbol_ensembl['']
except KeyError: null=[]
try: mirandaImport(parse_sequences,'no')
except Exception: pass
def runProgram(Species,Force,Only_add_sequence_to_previous_results):
global species; global only_add_sequence_to_previous_results; global symbol_ensembl; global force
global ens_gene_to_transcript; global microRNA_target_db; global mir_hit_db; global parse_sequences
global symbol_ensembl_current; combined_results={}
species = Species; compare_to_user_data = 'no'; force = Force
only_add_sequence_to_previous_results = Only_add_sequence_to_previous_results
try: ens_gene_to_transcript = importEnsTranscriptAssociations({},'archive')
except Exception: ens_gene_to_transcript={} ### Archived file not on server for this species
ens_gene_to_transcript = importEnsTranscriptAssociations(ens_gene_to_transcript,'current')
symbol_ensembl,symbol_ensembl_current = processEnsemblAnnotations()
microRNA_target_db = {}; mir_hit_db = {}
if only_add_sequence_to_previous_results != 'yes':
parse_sequences = 'no'
try: del symbol_ensembl['']
except KeyError: pass
try: sangerImport(parse_sequences)
except Exception: print '\sangerImport import failed...\n'
try: TargetScanImport(parse_sequences,'yes')
except Exception,e: print e,'\nTargetScan import failed...\n'
try: importmiRNAMap('no',Force)
except Exception: print '\nmirMap import failed...\n'
try: mirandaImport(parse_sequences,'yes')
except Exception: print '\nmiranda import failed...\n'
try: added = pictarImport(parse_sequences,'pre-computed',{})
except Exception: print '\npictar pre-computed import failed...\n'
try: added = pictarImport(parse_sequences,'symbol-based',added)
except Exception: print '\npictar symbol-based import failed...\n'
if compare_to_user_data == 'yes': importExpressionData(); mirHitImport()
findMirTargetOverlaps(); exportCombinedMirResultSequences()
else: exportCombinedMirResultSequences()
def verifyFile(filename,species_name):
fn=filepath(filename); counts=0
try:
for line in open(fn,'rU').xreadlines():
counts+=1
if counts>10: break
except Exception:
counts=0
if species_name == 'counts': ### Used if the file cannot be downloaded from http://www.altanalyze.org
return counts
elif counts == 0:
if species_name in filename: server_folder = species_name ### Folder equals species unless it is a universal file
elif 'Mm' in filename: server_folder = 'Mm' ### For PicTar
else: server_folder = 'all'
print 'Downloading:',server_folder,filename
update.downloadCurrentVersion(filename,server_folder,'txt')
else:
return counts
def verifyFileAdvanced(fileprefix,species):
g = GrabFiles(); g.setdirectory('/AltDatabase/miRBS/'+species)
try: filename = g.searchdirectory(fileprefix)[0]
except Exception: filename=[]
if '.' not in filename: filename=[]
return filename
def reformatGeneToMiR(species,type):
### Import and re-format the miRNA-gene annotation file for use with GO-Elite (too big to do in Excel)
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_microRNA-Ensembl.txt'
fn=filepath(filename); reformatted=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
miR,ens,source = string.split(data,'\t')
if type == 'strict' and '|' in source: ### Only include predictions with evidence from > 1 algorithm (miRanda, mirbase, TargetScan, pictar or RNAhybrid)
reformatted.append([ens,'En',miR])
elif type == 'lax':
reformatted.append([ens,'En',miR])
if len(reformatted)>10: ### Ensure there are sufficient predictions for over-representation for that species
filename = string.replace(filename,'.txt','-GOElite_lax.txt')
if type == 'strict':
filename = string.replace(filename,'lax','strict')
data = export.ExportFile(filename)
### Make title row
headers=['GeneID','SystemCode','Pathway']
headers = string.join(headers,'\t')+'\n'; data.write(headers)
for values in reformatted:
values = string.join(values,'\t')+'\n'; data.write(values)
data.close()
print filename,'exported...'
def reformatAllSpeciesMiRAnnotations():
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
for species in existing_species_dirs:
try:
reformatGeneToMiR(species,'lax')
reformatGeneToMiR(species,'strict')
except Exception: null=[] ### Occurs for non-species directories
def copyReformattedSpeciesMiRAnnotations(type):
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
for species in existing_species_dirs:
try:
ir = filepath('AltDatabase/ensembl/'+species+'/'+species+'_microRNA-Ensembl-GOElite_'+type+'.txt')
er = filepath('GOElite/'+species+'_microRNA-Ensembl-GOElite_'+type+'.txt')
export.copyFile(ir, er)
print 'Copied miRNA-target database for:',species, type
except Exception: null=[] ### Occurs for non-species directories
def reformatDomainAssocations(species):
filename = 'AltDatabase/'+species+'/RNASeq/probeset-domain-annotations-exoncomp.txt'
fn=filepath(filename); gene_domain_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ens_gene = string.split(t[0],':')[0]
for i in t[1:]:
domain = string.split(i,'|')[0]
if 'IPR' in domain:
domain = string.split(domain,'-IPR')[0] +'(IPR'+ string.split(domain,'-IPR')[1]+')'
else:
domain+='(UniProt)'
try: gene_domain_db[ens_gene].append(domain)
except Exception: gene_domain_db[ens_gene] = [domain]
gene_domain_db = eliminateRedundant(gene_domain_db)
export_file = 'GOElite/'+species+'_Ensembl-Domain.txt'
export_data = export.ExportFile(export_file)
export_data.write('Ensembl\tEn\tDomain\n')
for gene in gene_domain_db:
for domain in gene_domain_db[gene]:
export_data.write(gene+'\tEn\t'+domain+'\n')
export_data.close()
print 'zipping',export_file
import gzip
f_in = open(filepath(export_file), 'rb')
f_out = gzip.open(filepath(export_file)[:-4]+'.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
os.remove(filepath(export_file))
def exportGOEliteGeneSets(type):
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
for species in existing_species_dirs:
if len(species)<4:
try:
reformatGeneToMiR(species,type)
except Exception: null=[] ### Occurs for non-species directories
reformatGeneToMiR(species,type)
reformatDomainAssocations(species)
copyReformattedSpeciesMiRAnnotations(type)
if __name__ == '__main__':
exportGOEliteGeneSets('lax');sys.exit()
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
for species in existing_species_dirs:
if len(species)<4: reformatDomainAssocations(species)
#exportMiRandaPredictionsOnly('Hs','no','no');sys.exit()
#runProgram(species,force,'no'); sys.exit()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/MatchMiRTargetPredictions.py
|
MatchMiRTargetPredictions.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import update
import export
import math
from build_scripts import EnsemblImport; reload(EnsemblImport)
from build_scripts import ExonArrayAffyRules
from build_scripts import SubGeneViewerExport
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
################# Parse and Analyze Files
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def grabRNAIdentifiers(mrna_assignment):
ensembl_ids=[]; mRNA_ids=[]; mRNA_entries = string.split(mrna_assignment,' /// ')
for entry in mRNA_entries:
mRNA_info = string.split(entry,' // '); mrna_ac = mRNA_info[0]
if 'GENSCAN' not in mrna_ac:
if 'ENS' in mrna_ac: ensembl_ids.append(mrna_ac)
else:
try: int(mrna_ac[-3:]); mRNA_ids.append(mrna_ac)
except ValueError: continue
ensembl_ids = unique.unique(ensembl_ids); mRNA_ids = unique.unique(mRNA_ids)
return ensembl_ids, mRNA_ids
def getProbesetAssociations(filename,ensembl_exon_db,ens_transcript_db,source_biotype):
#probeset_db[probeset] = affygene,exons,probe_type_call,ensembl
fn=filepath(filename); global transcript_cluster_data; global exon_location; global trans_annotation_db
probe_association_db={}; transcript_cluster_data = {}; trans_annotation_db = {}
print "Begin reading",filename; entries=0
exon_location={}
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n')
data = string.replace(data,'"',''); y = 0
t = string.split(data,',')
if data[0] != '#' and 'probeset_id' in data:
affy_headers = t
for header in affy_headers:
index = 0
while index < len(affy_headers):
if 'probeset_id' == affy_headers[index]: pi = index
if 'start' == affy_headers[index]: st = index
if 'stop' == affy_headers[index]: sp = index
if 'level' == affy_headers[index]: lv = index
if 'exon_id' == affy_headers[index]: ei = index
if 'transcript_cluster_id' == affy_headers[index]: tc = index
if 'seqname' == affy_headers[index]: sn = index
if 'strand' == affy_headers[index]: sd = index
if 'mrna_assignment' == affy_headers[index]: ma = index
#if 'fl' == affy_headers[index]: fn = index
#if 'mrna' == affy_headers[index]: mr = index
#if 'est' == affy_headers[index]: es = index
#if 'ensGene' == affy_headers[index]: eg = index
index += 1
elif data[0] != '#' and 'probeset_id' not in data:
try:
entries+=1
try: probeset_id=int(t[pi]);transcript_cluster_id=int(t[tc]); chr=t[sn];strand=t[sd]
except Exception: print affy_headers; print index; sys.exit()
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
start=int(t[st]);stop=int(t[sp]); exon_type=t[lv]; #fl=int(t[fn]); mRNA=int(t[mr]); est=int(t[es]); ensembl=int(t[eg])
continue_analysis = 'no'
if transcript_cluster_id not in trans_annotation_db:
mrna_assignment = t[ma]; ens_list=[]
if len(mrna_assignment)>4:
ensembl_data = string.split(mrna_assignment,' /// ')
for entry in ensembl_data:
if 'ENS' == entry[:3]:
ens_entry = string.split(entry,' // ')
if ens_entry[0] in ens_transcript_db: ens_list.append(ens_transcript_db[ens_entry[0]][0])
if len(ens_list)>0:
ens_list = unique.unique(ens_list)
trans_annotation_db[transcript_cluster_id] = ens_list
if source_biotype == 'ncRNA':
### transcript_cluster_ids are only informative for looking at mRNA encoding genes (can combine diff. ncRNAs in diff. introns of the same gene)
transcript_cluster_id = probeset_id
if test=='yes': ###used to test the program for a single gene
if transcript_cluster_id in test_cluster: continue_analysis='yes'
else: continue_analysis='yes'
if continue_analysis=='yes':
try: exon_location[transcript_cluster_id,chr,strand].append((start,stop,exon_type,probeset_id))
except KeyError: exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
###Assign constitutive information on a per probeset basis since per gene (unlike transcript_cluster centric analyses)
const_info = [transcript_cluster_id] #[ensembl,fl,est,probeset_id,transcript_cluster_id]
transcript_cluster_data[probeset_id] = const_info
except ValueError:
continue ###Control probeset with no exon information
for key in exon_location: exon_location[key].sort()
if test=='yes':
for i in exon_location:
print 'exon_location ',i
for e in exon_location[i]: print e
print entries,"entries in input file"
print len(transcript_cluster_data),"probesets,", len(exon_location),"transcript clusters"
print "Matching up ensembl and ExonArray probesets based on chromosomal location..."
###Re-organize ensembl and probeset databases based on gene position (note the ensembl db is strucutred similiarly to the transcript_cluster db)
###however, an additional call is required to match these up.
###ensembl_exon_db[(geneid,chr,strand)] = [[E5,exon_info]] #exon_info = (exon_start,exon_stop,exon_id,exon_annot)
ensembl_gene_position_pos_db, ensembl_gene_position_neg_db = getGenePositions(ensembl_exon_db,'yes')
affy_gene_position_pos_db, affy_gene_position_neg_db = getGenePositions(exon_location,'no')
if test=='yes':
for chr in affy_gene_position_pos_db:
b = affy_gene_position_pos_db[chr]
for i in b:
for e in b[i]:
for pos in e: print 'pos',chr,i,pos
for chr in affy_gene_position_neg_db:
b = affy_gene_position_neg_db[chr]
for i in b:
for e in b[i]:
for pos in e: print 'neg',chr,i,pos
###Dump memory
"""print ensembl_exon_db
print exon_location
print ensembl_gene_position_pos_db
print affy_gene_position_pos_db
killer"""
exon_location={}; global ensembl_probeset_db
print "First round (positive strand)..."
#global merged_gene_loc
merged_gene_loc={}; no_match_list=[]
merged_gene_loc,no_match_list = getChromosomalOveralap(affy_gene_position_pos_db,ensembl_gene_position_pos_db,merged_gene_loc,no_match_list)
print "Second round (negative strand)..."
merged_gene_loc,no_match_list = getChromosomalOveralap(affy_gene_position_neg_db,ensembl_gene_position_neg_db,merged_gene_loc,no_match_list)
merged_gene_loc = resolveProbesetAssociations(merged_gene_loc,no_match_list)
ensembl_probeset_db = reorderEnsemblLinkedProbesets(merged_gene_loc,transcript_cluster_data,trans_annotation_db)
if test=='yes':
for i in merged_gene_loc:
print 'merged_gene_loc ',i
for e in merged_gene_loc[i]: print e
if test=='yes':
for i in ensembl_probeset_db:
print 'ensembl_probeset_db ',i
for e in ensembl_probeset_db[i]: print e
###Dump memory
affy_gene_position_neg_db={};ensembl_gene_position_pos_db={}; no_match_list={}
print "Begining to assembl constitutive annotations (Based on Ensembl/FL/EST evidence)..."
transcript_cluster_data={}
print "Assigning exon-level Ensembl annotations to probesets (e.g. exon number) for", len(ensembl_probeset_db),'genes.'
ensembl_probeset_db = annotateExons(ensembl_probeset_db,exon_clusters,ensembl_exon_db,exon_region_db,intron_retention_db,intron_clusters,ucsc_splicing_annot_db)
print "Exporting Ensembl-merged probeset database to text file for", len(ensembl_probeset_db),'genes.'
exportEnsemblLinkedProbesets(arraytype, ensembl_probeset_db,species)
return ensembl_probeset_db
################# Identify overlap between Ensembl and transcript_clusters
def getGenePositions(exon_db,call):
chromosome_pos_db={}; chromosome_neg_db={}
for key in exon_db:
t=[]
strand = key[-1]; chr = key[1]; index1= 0; index2 = -1; indexa = 0; indexb = 1
if strand == '-': index1= -1; index2 = 0; indexa = 1; indexb = 0
if call == 'yes':
### For Ensembl exon coordinates - get Gene Coordinates
### Doesn't work for NKX2-5 in human for exon array - first exon spans the last exon (by AltAnalyze's definitions)
#geneStart = exon_db[key][index1][1][indexa] #first sorted exon
#geneStop = exon_db[key][index2][1][indexb]
for exon_data in exon_db[key]:
t.append(exon_data[1][0])
t.append(exon_data[1][1])
else:
### For transcript cluster data (slightly different format than above) - get Gene Coordinates
chr = chr[3:]
if '_' in chr: c = string.split(chr,'_'); chr=c[0] ###For _unknown chr from Affy's annotation file
#geneStart = exon_db[key][index1][indexa] #first sorted exon
#geneStop = exon_db[key][index2][indexb]
for exon_data in exon_db[key]:
t.append(exon_data[0])
t.append(exon_data[1])
#t=[]; t.append(geneStart); t.append(geneStop); t.sort()
t.sort(); t = [t[0],t[-1]]
if strand == '-':
if chr in chromosome_neg_db:
gene_position_db = chromosome_neg_db[chr]
gene_position_db[tuple(t)] = key,exon_db[key]
else:
gene_position_db={}
gene_position_db[tuple(t)] = key,exon_db[key]
chromosome_neg_db[chr] = gene_position_db
if strand == '+':
if chr in chromosome_pos_db:
gene_position_db = chromosome_pos_db[chr]
gene_position_db[tuple(t)] = key,exon_db[key]
else:
gene_position_db={}
gene_position_db[tuple(t)] = key,exon_db[key]
chromosome_pos_db[chr] = gene_position_db
return chromosome_pos_db,chromosome_neg_db
def getChromosomalOveralap(affy_chr_db,ensembl_chr_db,ensembl_transcript_clusters,no_match_list):
"""Find transcript_clusters that have overlapping start positions with Ensembl gene start and end (based on first and last exons)"""
###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
y = 0; l =0; multiple_ensembl_associations=[]
###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)
for chr in affy_chr_db:
try:
ensembl_db = ensembl_chr_db[chr]
affy_db = affy_chr_db[chr]
for (bp1,ep1) in affy_db:
x = 0
transcript_cluster_key = affy_db[(bp1,ep1)][0]
for (bp2,ep2) in ensembl_db:
y += 1; ensembl = ensembl_db[(bp2,ep2)][0][0]
#print ensembl, transcript_cluster_key, (bp2,ep2),(bp1,ep1);kill
###if the two gene location ranges overlapping
###affy_probeset_info = (start,stop,exon_type,probeset_id)
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((bp2 >= bp1) and (ep1 >= bp2)):
x = 1; affy_probeset_info = affy_db[(bp1,ep1)][1]; ensembl_key = ensembl_db[(bp2,ep2)][0],(bp2,ep2),affy_probeset_info
try:ensembl_transcript_clusters[transcript_cluster_key].append(ensembl_key)
except KeyError: ensembl_transcript_clusters[transcript_cluster_key] = [ensembl_key]
l += 1
if x == 0: no_match_list.append(transcript_cluster_key)
except KeyError: print chr, 'data not found'
print "Transcript Clusters overlapping with Ensembl:",len(ensembl_transcript_clusters)
print "With NO overlapp",len(no_match_list)
return ensembl_transcript_clusters,no_match_list
def resolveProbesetAssociations(ensembl_transcript_clusters,no_match_list):
ensembl_transcript_clusters2={}; x = 0
for transcript_cluster_key in ensembl_transcript_clusters:
ensembl_groups = ensembl_transcript_clusters[transcript_cluster_key]
probeset_data_list = ensembl_groups[0][2]
if len(ensembl_groups)>1:
###although the affy_transcript_cluster may overlapp, each probe does not
###Therefore associate the appropriate probesets with the appropriate ensembl ids
x += 1
for probeset_data in probeset_data_list:
(bp1,ep1,exon_type,probeset_id) = probeset_data
y = 0
for data in ensembl_groups:
ensembl_id = data[0]
(bp2,ep2) = data[1]
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((bp2 >= bp1) and (ep1 >= bp2)):
y = 1
###Reduce data to Ensembl gene level (forget transcript_cluster_ids)
try:
ensembl_transcript_clusters2[ensembl_id].append(probeset_data)
except KeyError:
ensembl_transcript_clusters2[ensembl_id] = [probeset_data]
else:
for data in ensembl_groups:
ensembl_id = data[0]
probeset_data = data[2]
try:
ensembl_transcript_clusters2[ensembl_id].append(probeset_data)
except KeyError:
ensembl_transcript_clusters2[ensembl_id] = [probeset_data]
print "Unique Ensembl genes linked to one or more transcript clusters:",len(ensembl_transcript_clusters2)
print "Ensembl genes linked to more than one transcript cluster:",x
print "Transcript clusters with NO links to Ensembl", len(no_match_list)
print "\nNOTE: if multiple clusters linked to an Ensembl, exons outside the Ensembl overlapp with be discarded\n"
return ensembl_transcript_clusters2
################# Annotate Exons based on location
def convertListString(list,delimiter):
list2 = []; list = unique.unique(list); list.sort()
for i in list:
if len(str(i))>0: list2.append(str(i))
str_values = string.join(list2,delimiter)
return str_values
def annotateExons(exon_location,exon_clusters,ensembl_exon_db,exon_region_db,intron_retention_db,intron_clusters,ucsc_splicing_annot_db):
###Annotate Affymetrix probesets independent of other annotations. A problem with this method
###Is that it fails to recognize distance probesets that probe different regions of the same exon.
###(start,stop,probeset_id,exon_class) = probeset_data
###exon_clusters is from EnsemblImport: exon_data = exon,(start,stop),[accessions],[types]
###Reverse exon entries based on strand orientation
for key in exon_location:
exon_location[key].sort(); strand = key[-1]
if strand == "-": exon_location[key].reverse()
#alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z']
probeset_aligments={}; exon_location2={}
x = 0; p = 0; count = 0
for key in exon_location:
old_index = 0; index2 = 1; index3 = 0; exon_list=[]; strand = key[-1]; last_added = 'null'
#if key[-1] == '-':
gene = key[0]
new_key = (key[0],key[1][3:],key[2])
for exon in exon_location[key]: ###probeset location database
start = int(exon[0]); stop = int(exon[1]); probeset = exon[2]; probeset_region_exon_db={}
#if count < 20: print probeset
#else: die
ens_exonid_values=''; ens_constitutive=''; splice_event_value=''; splice_junction_values=''; region_value=''
y = 0; u = 0; junction = 'no'; new_exon_data = ''; exon_start='';exon_stop=''
count+=1 ###Create the lists here so we can store data for multiple exon hits, if they occur (not good if they do)
temp_probeset_exon_id=[]; temp_probeset_annot=[]; region_ids=[]; splice_events=[]; splice_junctions=[]; reg_start=[];reg_stop=[]
#print probeset,start,stop;kill
if new_key in intron_retention_db:
for exon_data in intron_retention_db[new_key]:
t_start = exon_data[0]; t_stop = exon_data[1]; ed = exon_data[2]
if ((start >= t_start) and (stop <= t_stop)): splice_events.append('intron-retention')
###Match up probesets specifically with UCSC exon annotations (need to do it here to precisely align probesets)
if gene in ucsc_splicing_annot_db:
ucsc_events = ucsc_splicing_annot_db[gene]
for (r_start,r_stop,splice_event) in ucsc_events:
if ((start >= r_start) and (start < r_stop)) or ((stop > r_start) and (stop <= r_stop)): splice_events.append(splice_event)
elif ((r_start >= start) and (r_start <= stop)) or ((r_stop >= start) and (r_stop <= stop)): splice_events.append(splice_event)
for exon_values in exon_clusters[key]: ###Ensembl location database
ref_start = exon_values[1][0]; ref_stop = exon_values[1][1]; ens_exon_ida = exon_values[2][0]
"""if probeset == 'G6857086:E15' and ens_exon_ida == 'ENSMUSE00000101953':
print start,ref_start,stop,ref_stop,exon_values[2]
if ((start >= ref_start) and (stop <= ref_stop)) or ((start >= ref_start) and (start <= ref_stop)) or ((stop >= ref_start) and (stop <= ref_stop)): print 'good'
else: print 'bad'
kill"""
if ((start >= ref_start) and (stop <= ref_stop)) or ((start >= ref_start) and (start < ref_stop)) or ((stop > ref_start) and (stop <= ref_stop)):
new_index = exon_values[0]
#probeset_exon_list.append((new_index,key,start,stop,ref_start,ref_stop))
###Grab individual exon annotations
#temp_probeset_exon_id=[]; temp_probeset_annot=[]; region_ids=[]; splice_events=[]; splice_junctions=[]
for exon_data in ensembl_exon_db[new_key]:
t_start = exon_data[1][0]; t_stop = exon_data[1][1]; ed = exon_data[1][2]
if ((start >= t_start) and (start < t_stop)) or ((stop > t_start) and (stop <= t_stop)):
if ((start >= t_start) and (start < t_stop)) and ((stop > t_start) and (stop <= t_stop)):
### Thus the probeset is completely in this exon
try: probeset_aligments[probeset].append([t_start,t_stop])
except KeyError: probeset_aligments[probeset]=[[t_start,t_stop]]
"""if probeset == 'G6857086:E15' and ens_exon_ida == 'ENSMUSE00000101953':
print probeset, start,t_start,stop,t_stop,exon_values[2], 'goood', ed.ExonID()"""
temp_probeset_exon_id.append(ed.ExonID())
block_db = exon_region_db[gene]
###Annotate exon regions
for rd in block_db[new_index]:
if strand == '-': r_stop = rd.ExonStart(); r_start = rd.ExonStop()
else: r_start = rd.ExonStart(); r_stop = rd.ExonStop()
if ((start >= r_start) and (start < r_stop)) or ((stop > r_start) and (stop <= r_stop)):
reg_start.append(r_start); reg_stop.append(r_stop)
region_ids.append(rd.ExonRegionID()) ###only one region per probeset unless overlapping with two
try: splice_events.append(rd.AssociatedSplicingEvent());splice_junctions.append(rd.AssociatedSplicingJunctions())
except AttributeError: splice_events = splice_events ###occurs when the associated exon is not a critical exon
temp_probeset_annot.append(rd.Constitutive())
"""region_ids = unique.unique(region_ids)
if len(region_ids)>1:
print key, region_ids
for rd in block_db[new_index]: print start, rd.ExonStart(), stop, rd.ExonStop(), probeset, new_index,rd.RegionNumber();die"""
#elif ((start >= t_start) and (start <= t_stop)): ###Thus only the start is inside an exon
#if strand == '+': print probeset,key, ed.ExonID(), start, stop, t_start,t_stop;kill
if len(temp_probeset_exon_id)>0:
ens_exonid_values = convertListString(temp_probeset_exon_id,'|')
if 'yes' in temp_probeset_annot: ens_constitutive = 'yes'
elif '1' in temp_probeset_annot: ens_constitutive = '1'
else: ens_constitutive = convertListString(temp_probeset_annot,'|')
region_value = convertListString(region_ids,'|'); splice_event_value = convertListString(splice_events,'|')
exon_start = convertListString(reg_start,'|'); exon_stop = convertListString(reg_stop,'|')
splice_junction_values = convertListString(splice_junctions,'|');u = 1
splice_event_value = string.replace(splice_event_value,'intron-retention','exon-region-exclusion')
ned = EnsemblImport.ProbesetAnnotation(ens_exonid_values, ens_constitutive, region_value, splice_event_value, splice_junction_values,exon_start,exon_stop)
new_exon_data = exon,ned
#print key; exon; exon_values[2],exon_values[3],dog
if new_index == old_index:
exon_info = 'E'+str(old_index)+'-'+str(index2); index_val = 'old'
y = 1; index2 += 1; last_added = 'exon' #;last_start = start; last_stop = stop
if exon_values == exon_clusters[key][-1]: final_exon_included = 'yes'
else: final_exon_included = 'no'
#print exon_clusters[key][-1], exon_values, old_index, exon_info, probeset,final_exon_included;kill
else:
index2 = 1
exon_info = 'E'+str(new_index)+'-'+str(index2); index_val = old_index
old_index = new_index
y = 1; index2 += 1; last_added = 'exon' #;last_start = start; last_stop = stop
if exon_values == exon_clusters[key][-1]: final_exon_included = 'yes'
else: final_exon_included = 'no'
#print 'blah',new_index,old_index
"""if len(exon)>7: ###Therefore, this probeset associated with distinct exon clusters (bad design)
index2 = (index2 - 1); x += 1"""
if u != 1: ###Therefore, there were specific exon associations available
ens_exonid_values = convertListString(exon_values[2],'|');
if 'yes' in exon_values[3]: ens_constitutive = 'yes'
elif '1' in exon_values[3]: ens_constitutive = '1'
else: ens_constitutive = convertListString(exon_values[3],'|')
ned = EnsemblImport.ProbesetAnnotation(ens_exonid_values, ens_constitutive, region_value, splice_event_value, splice_junction_values,exon_start,exon_stop)
new_exon_data = exon,ned
"""if len(exon)>7: ###Therefore, this probeset associated with distinct exon clusters (bad design)
index2 = (index2 - 1); x += 1"""
continue
#if len(probeset_exon_list)>1: print probeset,probeset_exon_list;kill
#if probeset == '2948539': print y,last_added,final_exon_included,old_index,new_index,index2;kill
if y == 1:
"""if len(exon)>7:
exon = exon[0:5]
if index_val != 'old':
old_index = index_val; y=0; junction = 'yes'
else:"""
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
if y == 0: ###Thus this probeset is not in any ensembl exons
###Check if the first probesets are actually in an intron
#if probeset == '2948539': print probeset,'a',last_added,old_index,new_index,index2
if key in intron_clusters:
for intron_values in intron_clusters[key]: ###Ensembl location database
ref_start = intron_values[1][0]; ref_stop = intron_values[1][1]
if ((start >= ref_start) and (stop <= ref_stop)) or ((start >= ref_start) and (start <= ref_stop)) or ((stop >= ref_start) and (stop <= ref_stop)):
old_index = intron_values[0]
if last_added == 'intron': last_added = 'intron'
elif last_added == 'null': last_added = 'exon' ###utr is the default assigned at the top
final_exon_included = 'no'
#if probeset == '2948539': print probeset,'b',last_added,old_index,new_index,index2
temp_probeset_exon_id=[]; temp_probeset_annot=[]; intron_retention_found = 'no'
if new_key in intron_retention_db:
for exon_data in intron_retention_db[new_key]:
t_start = exon_data[0]; t_stop = exon_data[1]; ed = exon_data[2]
if ((start >= t_start) and (stop <= t_stop)):
temp_probeset_exon_id.append(ed.ExonID()); temp_probeset_annot.append(ed.Constitutive())
if len(temp_probeset_exon_id)>0:
temp_probeset_exon_id = unique.unique(temp_probeset_exon_id); temp_probeset_annot = unique.unique(temp_probeset_annot)
ens_exonid_values = convertListString(temp_probeset_exon_id,'|'); ens_constitutive = convertListString(temp_probeset_annot,'|')
#ned = EnsemblImport.ProbesetAnnotation(ens_exonid_values, ens_constitutive, 0, 'intron-retention', '')
splice_event_value = 'intron-retention'
#new_exon_data = exon,ned; intron_retention_found = 'yes'
#if probeset == '2948539': print probeset,'c',last_added,old_index,index2
ned = EnsemblImport.ProbesetAnnotation(ens_exonid_values, ens_constitutive, region_value, splice_event_value, splice_junction_values,exon_start,exon_stop)
new_exon_data = exon,ned
if old_index == 0: ###Means exons that are 5' to the ensembl reference exons and not in an intron
#if probeset == '2948539': print probeset,'dU',last_added,old_index,new_index,index2
exon_info = 'U'+str(old_index)+'-'+str(index2) ###index2 will be 0 the first time by default
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
last_added = 'utr'; index2 += 1
#if probeset == '2948539':print probeset,exon_info, 'xl'
else:
if last_added == 'exon':
#if probeset == '2948539': print probeset,'eIU',last_added,old_index,new_index,index2
if final_exon_included == 'no':
index2 = 1
exon_info = 'I'+str(old_index)+'-'+str(index2)
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
last_added = 'intron'; index2 += 1
#if probeset == '2948539':print probeset,exon_info
else:
index2 = 1
exon_info = 'U'+str(old_index)+'-'+str(index2)
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
last_added = 'utr'; index2 += 1
#if probeset == '2948539':print probeset,exon_info
elif last_added == 'intron':
#if probeset == '2948539': print probeset,'fI',last_added,old_index,new_index,index2
exon_info = 'I'+str(old_index)+'-'+str(index2)
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
last_added = 'intron'; index2 += 1
#if probeset == '2948539':print probeset,exon_info
elif last_added == 'utr':
### Since the probeset is not in an exon and the last added was a UTR
### could be in the 3'UTR (if in the 5'UTR it would have an old_index = 0) or
### could be an intron, if no probesets aligned to the first exon
if final_exon_included == 'no': # alternative: old_index == 1: ###thus it is really in the first intron: example is 2948539
exon_info = 'I'+str(old_index)+'-'+str(index2)
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
last_added = 'intron'; index2 += 1
else:
exon_info = 'U'+str(old_index)+'-'+str(index2)
exon_info = exon_info,new_exon_data; exon_list.append(exon_info)
last_added = 'utr'; index2 += 1
#if probeset == '2948539':print probeset,exon_info;kill
if len(exon_list)>0: exon_location2[key] = exon_list
#kill
#print x,p
#exportProbesetAlignments(probeset_aligments)
for key in exon_location2:
if key[0] == 'ENSG00000138231':
print key
for item in exon_location2[key]: print item
for key in exon_location2:
if key[0] == 'ENSG00000095794':
print key
for item in exon_location2[key]: print item
return exon_location2
def reorderEnsemblLinkedProbesets(ensembl_transcript_clusters,transcript_cluster_data,trans_annotation_db):
print len(trans_annotation_db), 'entries in old trans_annotation_db'
ensembl_probeset_db={}; probeset_gene_redundancy={}; gene_transcript_redundancy={}; y=0; x=0; n=0; l=0; k=0
for key in ensembl_transcript_clusters:
geneid = key[0]; chr = 'chr'+ key[1]; strand = key[2]
for entry in ensembl_transcript_clusters[key]:
###The structure of the entries varies dependant on if there were multiple ensembl's associated with a single transcript_cluster_id
try: entry[0][0]
except TypeError: entry = [entry]
for probeset_info in entry:
###Rebuild this database since structural inconsistencies exist
start = str(probeset_info[0]); stop = str(probeset_info[1]); exon_class = probeset_info[2]; probeset_id = probeset_info[3]
transcript_cluster_id = transcript_cluster_data[probeset_id][-1]
probeset_data = [start,stop,probeset_id,exon_class,transcript_cluster_id]; y += 1
try: ensembl_probeset_db[geneid,chr,strand].append(probeset_data)
except KeyError: ensembl_probeset_db[geneid,chr,strand]=[probeset_data]
try: probeset_gene_redundancy[probeset_id,start,stop].append((geneid,chr,strand))
except KeyError: probeset_gene_redundancy[probeset_id,start,stop] = [(geneid,chr,strand)]
try: gene_transcript_redundancy[geneid].append(transcript_cluster_id)
except KeyError: gene_transcript_redundancy[geneid] = [transcript_cluster_id]
###Correct incorrect Ensembl assignments based on trans-splicing etc.
probeset_gene_redundancy = eliminateRedundant(probeset_gene_redundancy)
gene_transcript_redundancy = eliminateRedundant(gene_transcript_redundancy)
print len(ensembl_probeset_db), 'entries in old ensembl_probeset_db'
print len(gene_transcript_redundancy), 'entries in old gene_transcript_redundancy'
### Added this new code to determine which transcript clusters uniquely detect a single gene (exon-level)
### Note: this is a potentially lengthy step (added in version 2.0.5)
valid_gene_to_cluster_annotations={}; gene_transcript_redundancy2={}
for probeset_info in probeset_gene_redundancy:
probeset = probeset_info[0];start = int(probeset_info[1]); stop = int(probeset_info[2])
transcript_cluster_id = transcript_cluster_data[probeset][-1]
for ensembl_group in probeset_gene_redundancy[probeset_info]:
pos_ens,neg_ens = alignProbesetsToEnsembl([],[],start,stop,ensembl_group) ### Indicates that at least one probeset in the TC aligns certain Ensembl genes
ens_gene = ensembl_group[0]
if len(pos_ens)>0:
try: valid_gene_to_cluster_annotations[transcript_cluster_id].append(ens_gene)
except Exception: valid_gene_to_cluster_annotations[transcript_cluster_id] = [ens_gene]
valid_gene_to_cluster_annotations = eliminateRedundant(valid_gene_to_cluster_annotations)
print len(valid_gene_to_cluster_annotations), 'Valid gene-to-transcript cluser assignments based on genomic position'
### Remove probeset-gene and transcript_cluster-gene associations not supported by exon evidence
for tc in valid_gene_to_cluster_annotations:
for gene in valid_gene_to_cluster_annotations[tc]:
try: gene_transcript_redundancy2[gene].append(tc)
except Exception: gene_transcript_redundancy2[gene] = [tc]
del_probesets = {}
for probeset_info in probeset_gene_redundancy:
probeset = probeset_info[0]
transcript_cluster_id = transcript_cluster_data[probeset][-1]
if transcript_cluster_id in valid_gene_to_cluster_annotations: ### If not, don't change the existing relationships
keep=[]
for ensembl_group in probeset_gene_redundancy[probeset_info]:
ens_gene = ensembl_group[0]
if ens_gene in valid_gene_to_cluster_annotations[transcript_cluster_id]: keep.append(ensembl_group)
probeset_gene_redundancy[probeset_info] = keep ### Replace the existing with this filtered list
else: del_probesets[probeset_info] = []
for pi in del_probesets: del probeset_gene_redundancy[pi]
trans_annotation_db = valid_gene_to_cluster_annotations
gene_transcript_redundancy = gene_transcript_redundancy2
print len(trans_annotation_db), 'entries in new trans_annotation_db'
print len(gene_transcript_redundancy), 'entries in new gene_transcript_redundancy'
print len(probeset_gene_redundancy), 'entries in probeset_gene_redundancy'
ensembl_probeset_db2 = {}
for (geneid,chr,strand) in ensembl_probeset_db:
for probeset_data in ensembl_probeset_db[(geneid,chr,strand)]:
start,stop,probeset_id,exon_class,transcript_cluster_id = probeset_data
try:
if (geneid,chr,strand) in probeset_gene_redundancy[probeset_id,start,stop]:
try: ensembl_probeset_db2[(geneid,chr,strand)].append(probeset_data)
except Exception: ensembl_probeset_db2[(geneid,chr,strand)] = [probeset_data]
except KeyError: null=[]
ensembl_probeset_db = ensembl_probeset_db2
print len(ensembl_probeset_db), 'entries in new ensembl_probeset_db'
###First check for many transcript IDs associating with one Ensembl
remove_transcripts_clusters={}
for geneid in gene_transcript_redundancy:
transcript_cluster_id_list = gene_transcript_redundancy[geneid]
if len(transcript_cluster_id_list)>1:
for transcript_cluster_id in transcript_cluster_id_list:
if transcript_cluster_id in trans_annotation_db: ###Check the Affymetrix transcript-Ensembl associations
ensembl_list = trans_annotation_db[transcript_cluster_id]
#[] ['3890870', '3890907', '3890909', '3890911'] ENSG00000124209
if transcript_cluster_id in test_cluster: print ensembl_list,transcript_cluster_id_list,geneid
if geneid not in ensembl_list:
try: remove_transcripts_clusters[transcript_cluster_id].append(geneid)
except Exception: remove_transcripts_clusters[transcript_cluster_id]=[geneid]
"""
###Perform a multi-step more refined search and remove transcript_clusters identified from above, that have no annotated Ensembl gene
remove_probesets={}
for probeset_info in probeset_gene_redundancy:
probeset = probeset_info[0]
start = int(probeset_info[1]); stop = int(probeset_info[2])
transcript_cluster_id = transcript_cluster_data[probeset][-1]
###First check to see if any probeset in the transcript_cluster_id should be eliminated
if transcript_cluster_id in remove_transcripts_clusters:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
if len(probeset_gene_redundancy[probeset_info])>1:
x += 1
###Grab the redundant Ensembl list for each probeset
ensembl_list1 = probeset_gene_redundancy[probeset_info]
###Grab the Ensembl list aligning to the transcript_cluster annotations for that probeset
try:ensembl_list2 = trans_annotation_db[transcript_cluster_id]
except KeyError: ensembl_list2=[]
pos_ens=[]; neg_ens=[]
for ensembl_group in ensembl_list1:
ensembl = ensembl_group[0]
if ensembl in ensembl_list2: pos_ens.append(ensembl_group)
else: neg_ens.append(ensembl_group)
if len(pos_ens) == 1:
n += 1
for ensembl_group in neg_ens:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
else: ###These are probesets for where the code did not identify a 'best' ensembl
###get probeset location and check against each exon in the exon cluster list
for ensembl_group in ensembl_list1: ###exon_clusters is from EnsemblImport: exon_data = exon,(start,stop),[accessions],[types]
if ensembl_group in exon_clusters:
for exon_data in exon_clusters[ensembl_group]:
exon_start = exon_data[1][0]
exon_stop = exon_data[1][1]
if (start >= exon_start) and (stop <= exon_stop):
pos_ens.append(ensembl_group)
if len(pos_ens) == 0:
neg_ens.append(ensembl_group)
pos_ens = unique.unique(pos_ens); neg_ens = unique.unique(neg_ens)
if len(pos_ens) == 1:
l += 1
for ensembl_group in neg_ens:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
else: ###If no method for differentiating, probeset fom the database
k += 1
for ensembl_group in ensembl_list1:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
"""
remove_probesets={}
for probeset_info in probeset_gene_redundancy:
probeset = probeset_info[0]
start = int(probeset_info[1]); stop = int(probeset_info[2])
transcript_cluster_id = transcript_cluster_data[probeset][-1]
###Grab the redundant Ensembl list for each probeset
ensembl_list1 = probeset_gene_redundancy[probeset_info]
###First check to see if any probeset in the transcript_cluster_id should be eliminated
if transcript_cluster_id in remove_transcripts_clusters:
remove_genes = remove_transcripts_clusters[transcript_cluster_id] ### Why is this here
pos_ens=[]; neg_ens=[]
for ensembl_group in ensembl_list1: ###Ensembl group cooresponds to the exon_cluster dictionary key
pos_ens,neg_ens = alignProbesetsToEnsembl(pos_ens,neg_ens,start,stop,ensembl_group)
pos_ens = makeUnique(pos_ens); neg_ens = makeUnique(neg_ens)
if len(pos_ens)!=1:
###if there are no probesets aligning to exons or probesets aligning to exons in multiple genes, remove these
for ensembl_group in pos_ens:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
for ensembl_group in neg_ens:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
else:
###If a probeset is in an Ensembl exon (pos_ens), remove associations for probesets to ensembl IDs where the probeset is not in an exon for that gene
for ensembl_group in neg_ens:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
elif len(probeset_gene_redundancy[probeset_info])>1:
x += 1
###Grab the Ensembl list aligning to the transcript_cluster annotations for that probeset
try: ensembl_list2 = trans_annotation_db[transcript_cluster_id]
except KeyError: ensembl_list2=[]
pos_ens1=[]; neg_ens1=[]
for ensembl_group in ensembl_list1:
ensembl = ensembl_group[0]
if ensembl in ensembl_list2: pos_ens1.append(ensembl_group)
else: neg_ens1.append(ensembl_group)
pos_ens=[]; neg_ens=[]
###get probeset location and check against each exon in the exon cluster list
for ensembl_group in ensembl_list1: ###exon_clusters is from EnsemblImport: exon_data = exon,(start,stop),[accessions],[types]
exon_found = 0
if ensembl_group in exon_clusters:
for exon_data in exon_clusters[ensembl_group]:
exon_start = exon_data[1][0]
exon_stop = exon_data[1][1]
if (start >= exon_start) and (stop <= exon_stop):
pos_ens.append(ensembl_group); exon_found = 1
if exon_found == 0:
neg_ens.append(ensembl_group)
pos_ens = unique.unique(pos_ens); neg_ens = unique.unique(neg_ens)
#if probeset == 3161639: print 'b',pos_ens, transcript_cluster_id, neg_ens;sys.exit()
if len(pos_ens) == 1:
l += 1
for ensembl_group in neg_ens:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
elif len(pos_ens1) == 1:
n += 1
for ensembl_group in neg_ens1:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
else: ###If no method for differentiating, probeset fom the database
k += 1
for ensembl_group in ensembl_list1:
try: remove_probesets[ensembl_group].append(probeset)
except KeyError: remove_probesets[ensembl_group] = [probeset]
#if probeset == '3890871': print ensembl_group,ensembl_list1, probeset;kill
ensembl_probeset_db = removeRedundantProbesets(ensembl_probeset_db,remove_probesets)
print "Number of Probesets linked to Ensembl entries:", y
print "Number of Probesets occuring in multiple Ensembl genes:",x
print " removed by transcript_cluster editing:",n
print " removed by exon matching:",l
print " automatically removed (not associating with a clear single gene):",k
print " remaining redundant:",(x-(n+l+k))
return ensembl_probeset_db
def alignProbesetsToEnsembl(pos_ens,neg_ens,start,stop,ensembl_group):
try:
k=0
for exon_data in exon_clusters[ensembl_group]:
exon_start = exon_data[1][0];exon_stop = exon_data[1][1]
if (start >= exon_start) and (stop <= exon_stop): pos_ens.append(ensembl_group); k=1
if k==0: neg_ens.append(ensembl_group)
return pos_ens,neg_ens
except KeyError: return pos_ens,neg_ens
def removeRedundantProbesets(ensembl_probeset_db,remove_probesets):
###Remove duplicate entries and record which probesets associate with multiple ensembl's (remove these ensembl's)
#check_for_promiscuous_transcripts={}
for ensembl_group in remove_probesets:
new_probe_list=[]
for probeset_data in ensembl_probeset_db[ensembl_group]:
probeset = probeset_data[2]
#transcript_cluster_id = transcript_cluster_data[probeset][-1]
if probeset not in remove_probesets[ensembl_group]:
new_probe_list.append(probeset_data)
#try: check_for_promiscuous_transcripts[transcript_cluster_id].append(ensembl_group)
#except KeyError: check_for_promiscuous_transcripts[transcript_cluster_id] = [ensembl_group]
ensembl_probeset_db[ensembl_group] = new_probe_list
###Final Sanity check: see if probesets could have been added to more than one Ensmebl gene
probeset_ensembl_db={}
for ensembl_group in ensembl_probeset_db:
for probeset_data in ensembl_probeset_db[ensembl_group]:
probeset = probeset_data[2]
try: probeset_ensembl_db[probeset].append(ensembl_group)
except KeyError: probeset_ensembl_db[probeset] = [ensembl_group]
for probeset in probeset_ensembl_db:
if len(probeset_ensembl_db[probeset])>1: print probeset, probeset_ensembl_db[probeset]; kill
"""
###Remove Ensembl gene entries any transcript_cluster_id linking to multiple Ensembl genes
###This can occur if one set of probests links to one ensembl and another set (belong to the same transcript cluster ID), links to another
ens_deleted = 0
for transcript_cluster_id in check_for_promiscuous_transcripts:
if len(check_for_promiscuous_transcripts[transcript_cluster_id])>1: ###Therefore, more than one ensembl gene is associated with that probeset
for ensembl_group in check_for_promiscuous_transcripts[transcript_cluster_id]:
###remove these entries from the above ensembl probeset database
try: del ensembl_probeset_db[ensembl_group]; ens_deleted += 1
except KeyError: null = ''
print ens_deleted,"ensembl gene entries deleted, for linking to probesets with multiple gene associations after all other filtering"
"""
return ensembl_probeset_db
################# Select files for analysis and output results
def exportProbesetAlignments(probeset_aligments):
###These are probeset-transcript annotations direclty from Affymetrix, not aligned
probeset_annotation_export = 'AltDatabase/' + species + '/'+arraytype+'/'+ species + '_probeset-exon-align-coord.txt'
probeset_aligments = eliminateRedundant(probeset_aligments)
fn=filepath(probeset_annotation_export); data = open(fn,'w')
title = 'probeset_id'+'\t'+'genomic-start'+'\t'+'genomic-stop'+'\n'
data.write(title)
for probeset_id in probeset_aligments:
for coordinates in probeset_aligments[probeset_id]:
values = str(probeset_id) +'\t'+ str(coordinates[0]) +'\t'+ str(coordinates[1]) +'\n'
data.write(values)
data.close()
def exportEnsemblLinkedProbesets(arraytype, ensembl_probeset_db,species):
exon_annotation_export = 'AltDatabase/'+species+'/'+arraytype+'/'+species + '_Ensembl_probesets.txt'
subgeneviewer_export = 'AltDatabase/ensembl/'+species+'/'+species+'_SubGeneViewer_feature-data.txt'
fn=filepath(exon_annotation_export); data = open(fn,'w')
fn2=filepath(subgeneviewer_export); data2 = open(fn2,'w')
title = ['probeset_id','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
title +=['affy_class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
title2 =['probeset','gene-id','feature-id','region-id']
title = string.join(title,'\t') + '\n'; title2 = string.join(title2,'\t') + '\n'
data.write(title); data2.write(title2); y=0
print 'len(ensembl_probeset_db)',len(ensembl_probeset_db)
for key in ensembl_probeset_db:
geneid = key[0]; chr = key[1]; strand = key[2]
for probeset_data in ensembl_probeset_db[key]:
exon_id,((start,stop,probeset_id,exon_class,transcript_clust),ed) = probeset_data
try: constitutive = ed.ConstitutiveCall()
except Exception: constitutive = 'no'
if len(constitutive) == 0: constitutive = 'no'
start = str(start); stop = str(stop)
y+=1; ens_exon_list = ed.ExonID(); ens_annot_list = ed.Constitutive()
if len(ed.AssociatedSplicingEvent())>0: constitutive = 'no'; ens_annot_list = '0' ### Re-set these if a splicing-event is associated
values = [str(probeset_id),exon_id,geneid,str(transcript_clust),chr,strand,start,stop,exon_class,constitutive,ens_exon_list,ens_annot_list]
values+= [str(ed.RegionNumber()),ed.ExonStart(),ed.ExonStop(),ed.AssociatedSplicingEvent(),ed.AssociatedSplicingJunctions()]
region_num = ed.RegionNumber()
if len(region_num)<1: b,r = string.split(exon_id,'-'); region_num = b+'-1'
exon_id = string.replace(exon_id,'-','.'); region_num = string.replace(region_num,'-','.')
try: values = string.join(values,'\t')+'\n';
except TypeError:
print exon_id
print [probeset_id,exon_id,geneid,transcript_clust,chr,strand,start,stop,exon_class,constitutive,ens_exon_list,ens_annot_list]
print [ed.RegionNumber(),ed.AssociatedSplicingEvent(),ed.AssociatedSplicingJunctions()];kill
data.write(values)
exon_regions = string.split(region_num,'|')
for region in exon_regions:
try:
if filter_sgv_output == 'yes': ### Can filter out probeset information for all probesets not linked to a probable splice event
if len(ed.AssociatedSplicingEvent())>1 or len(ens_exon_list)>1: proceed = 'yes'
else: proceed = 'no'
else: proceed = 'yes'
except NameError: proceed = 'yes'
if proceed == 'yes':
#print filter_sgv_output, len(ed.AssociatedSplicingEvent()),len(ens_exon_list),[ed.AssociatedSplicingEvent()],[ens_exon_list],exon_id,region
values2 =[str(probeset_id),geneid,exon_id,region]
values2 = string.join(values2,'\t')+'\n'
data2.write(values2)
data.close()
print y, "Probesets linked to Ensembl entries exported to text file:",exon_annotation_export
def eliminateRedundant(database):
for key in database:
try: list = makeUnique(database[key])
except TypeError: list = unique.unique(database[key])
list.sort()
database[key] = list
return database
def makeUnique(item):
db1={}; list1=[]
for i in item: db1[i]=[]
for i in db1: list1.append(i)
list1.sort()
return list1
def testAffyAnnotationDownload(Species,array_type):
global species; species = Species; global arraytype; arraytype = array_type
checkDirectoryFiles()
def checkDirectoryFiles():
""" Check to see if the probeset annotation file is present and otherwise download AltAnalyze hosted version"""
dir = '/AltDatabase/'+species+'/'+arraytype
probeset_annotation_file = getDirectoryFiles(dir)
if probeset_annotation_file == None:
filename = update.getFileLocations(species,arraytype)
filename = dir[1:]+'/'+ filename
update.downloadCurrentVersion(filename,arraytype,'csv')
probeset_annotation_file = getDirectoryFiles(dir)
if probeset_annotation_file == None: print 'No Affymetrix annotation file present for:', arraytype, species; sys.exit()
else: print "Affymetrix annotation file found for", arraytype, species
return filepath(probeset_annotation_file)
def getDirectoryFiles(dir):
try: dir_list = read_directory(dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
export.createDirPath(filepath(dir[1:])) ### This directory needs to be created
dir_list = read_directory(dir)
probeset_annotation_file = None
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = dir[1:]+'/'+data
if '.transcript.' in affy_data_dir: transcript_annotation_file = affy_data_dir
elif '.annoS' in affy_data_dir: probeset_transcript_file = affy_data_dir
elif '.probeset' in affy_data_dir and '.csv' in affy_data_dir:
if '.zip' not in affy_data_dir: probeset_annotation_file = affy_data_dir ###This file lets you grab the same info as in probeset_transcript_file, but along with mRNA associations
return probeset_annotation_file
def reimportEnsemblProbesets(filename,probe_db=None,cs_db=None):
fn=filepath(filename); x = 0
#print [fn]
if probe_db != None:
probe_association_db=probe_db; constitutive_db=cs_db; constitutive_original_db={} ### grab these from a separate file
else:
probe_association_db={}; constitutive_db={}; constitutive_original_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0:
x=1;continue
else:
#t = string.split(line,'\t')
#probeset_id=t[0];ensembl_gene_id=t[1];chr=t[2];strand=t[3];start=t[4];stop=t[5];exon_class=t[6]
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_probeset, ens_exon_ids, exon_annotations,regionid,r_start,r_stop,splice_event,splice_junctions = string.split(data,'\t')
except Exception: print data;kill
probe_data = ensembl_gene_id,transcript_cluster_id,exon_id,ens_exon_ids,affy_class#,exon_annotations,constitutive_probeset
proceed = 'yes'
"""
if 'RNASeq' in filename:
### Restrict the analysis to exon RPKM or count data for constitutive calculation
if 'exon' in biotypes:
if '-' in probeset_id: proceed = 'no'
"""
if proceed == 'yes':
probe_association_db[probeset_id] = probe_data
if constitutive_probeset == 'yes':
try: constitutive_db[ensembl_gene_id].append(probeset_id)
except KeyError: constitutive_db[ensembl_gene_id] = [probeset_id]
else: ### There was a bug here that resulted in no entries added (AltAnalyze version 1.15) --- because constitutive selection options have changed, should not have been an issue
try: constitutive_original_db[ensembl_gene_id].append(probeset_id)
except KeyError: constitutive_original_db[ensembl_gene_id] = [probeset_id]
x+=1
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
for gene in constitutive_original_db:
if gene not in constitutive_db: constitutive_db[gene] = constitutive_original_db[gene]
constitutive_original_db={}
if 'RNASeq' in filename: id_name = 'junction IDs'
else: id_name = 'array IDs'
print len(constitutive_db), "constitutive genes and", len(probe_association_db), id_name, "imported out of", x,"lines."
return probe_association_db, constitutive_db
def reimportEnsemblProbesetsForSeqExtraction(filename,filter_type,filter_db):
fn=filepath(filename); x = 0; ensembl_exon_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1;continue
else:
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chr, strand, probeset_start, probeset_stop, affy_class, constitutive_probeset, ens_exon_ids, exon_annotations,regionid,r_start,r_stop,splice_event,splice_junctions = string.split(data,'\t')
except ValueError: t = string.split(data,'\t'); print t;kill
ens_exon_ids = string.split(ens_exon_ids,'|')
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
ed = EnsemblImport.ExonStructureData(ensembl_gene_id, chr, strand, r_start, r_stop, constitutive_probeset, ens_exon_ids, [])
ed.setAssociatedSplicingEvent(splice_event) ###Integrate splicing annotations to look for alternative promoters
#ed.setAssociatedSplicingJunctions(splice_junctions)
probe_data = exon_id,((probeset_start,probeset_stop,probeset_id,affy_class,transcript_cluster_id),ed)
if filter_type == 'null': proceed = 'yes'
elif filter_type == 'junctions':
ed = EnsemblImport.ProbesetAnnotation(exon_id, constitutive_probeset, regionid, splice_event, splice_junctions,r_start,r_stop)
probe_data = probeset_id, strand, probeset_start, probeset_stop
probe_data = probe_data,ed
proceed = 'yes'
elif filter_type == 'only-junctions':
if '-' in exon_id and '.' in exon_id:
try: location = chr+':'+string.split(r_start,'|')[1]+'-'+string.split(r_stop,'|')[0]
except Exception: location = chr+':'+probeset_start+'-'+probeset_stop
try: pos = [int(string.split(r_start,'|')[1]),int(string.split(r_stop,'|')[0])]
except Exception:
try: pos = [int(r_start),int(r_stop)]
except Exception: pos = [int(probeset_start),int(probeset_stop)]
probe_data = probeset_id, pos,location
proceed = 'yes'
else: proceed = 'no'
elif filter_type == 'gene':
if ensembl_gene_id in filter_db: proceed = 'yes'
else: proceed = 'no'
elif filter_type == 'gene-probesets': ### get all probesets for a query gene
if ensembl_gene_id in filter_db:
proceed = 'yes'
if '-' in exon_id and '.' in exon_id: proceed = 'no' #junction
else:
exon_id = string.replace(exon_id,'-','.')
try: block,region = string.split(exon_id[1:],'.')
except Exception: print original_exon_id;sys.exit()
if '_' in region:
region = string.split(region,'_')[0]
ed = EnsemblImport.ProbesetAnnotation(exon_id, constitutive_probeset, regionid, splice_event, splice_junctions,r_start,r_stop)
probe_data = (int(block),int(region)),ed,probeset_id ### sort by exon number
else: proceed = 'no'
elif filter_type == 'probeset':
if probeset_id in filter_db: proceed = 'yes'
else: proceed = 'no'
if filter_type == 'probesets':
if ':' in probeset_id: probeset_id = string.split(probeset_id,':')[1]
if len(regionid)<1: regionid = exon_id
ensembl_exon_db[probeset_id]=string.replace(regionid,'-','.')
elif filter_type == 'junction-regions':
if '.' in regionid and '-' in regionid: ### Indicates a junction probeset
try: ensembl_exon_db[ensembl_gene_id].append((probeset_id,regionid))
except KeyError: ensembl_exon_db[ensembl_gene_id]=[(probeset_id,regionid)]
elif proceed == 'yes':
if filter_type == 'gene':
if ensembl_gene_id not in ensembl_exon_db: ensembl_exon_db[ensembl_gene_id] = [probe_data]
else:
try: ensembl_exon_db[ensembl_gene_id].append(probe_data)
except KeyError: ensembl_exon_db[ensembl_gene_id] = [probe_data]
print len(ensembl_exon_db), "critical genes parsed with annotated exon data..."
return ensembl_exon_db
def getSplicingAnalysisProbesets(probeset_db,constitutive_db,annotate_db):
splicing_analysis_db={}; count=0; count2=0; count3=0
#ensembl_annotation_db[ensembl_gene_id] = symbol,description,mRNA_processing
for probeset in probeset_db:
ensembl_gene = probeset_db[probeset][0]
if ensembl_gene in constitutive_db:
try: splicing_analysis_db[ensembl_gene].append(probeset)
except KeyError: splicing_analysis_db[ensembl_gene] = [probeset]
count += 1
else:
if ensembl_gene in annotate_db: mRNA_processing = annotate_db[ensembl_gene][2]
else: mRNA_processing = ''
if mRNA_processing == 'RNA_processing/binding':
try: splicing_analysis_db[ensembl_gene].append(probeset)
except KeyError: splicing_analysis_db[ensembl_gene] = [probeset]
count2 += 1
else:
count3 += 1
#print 'Number of probesets with constitutive probes:',count
#print 'Number of other mRNA processing probesets:',count2
#print 'Number of probesets excluded:',count3
return splicing_analysis_db
def getAnnotations(process_from_scratch,x,source_biotype,Species):
"""Annotate Affymetrix exon array data using files Ensembl data (sync'ed to genome release)."""
### probeset_db[probeset] = gene,exon_number,ensembl_exon_annotation,ensembl_exon_id
#NEW probeset_db[probeset] = gene,transcluster,exon_id,ens_exon_ids,exon_annotations,constitutive
### NA constitutive_db[gene] = [probeset]
### annotate_db[gene] = definition, symbol,rna_processing
global species; species = Species; global export_probeset_mRNA_associationsg; global biotypes
global test; global test_cluster; global filter_sgv_output; global arraytype
export_probeset_mRNA_associations = 'no'; biotypes = ''
if source_biotype == 'junction': arraytype = 'junction'; source_biotype = 'mRNA'
elif source_biotype == 'gene': arraytype = 'gene'; reimportEnsemblProbesetsForSeqExtraction = 'mRNA'
elif 'RNASeq' in source_biotype: arraytype,database_root_dir = source_biotype; source_biotype = 'mRNA'
else: arraytype = 'exon'
filter_sgv_output = 'no'
test = 'no'
test_cluster = [3161519, 3161559, 3161561, 3161564, 3161566, 3161706, 3161710, 3161712, 2716656, 2475411]
test_cluster = [2887449]
partial_process = 'no'; status = 'null'
if process_from_scratch == 'yes':
if partial_process == 'no':
#"""
global ensembl_exon_db; global ensembl_exon_db; global exon_clusters
global exon_region_db; global intron_retention_db; global intron_clusters; global ucsc_splicing_annot_db
global constitutive_source; constitutive_source = x
probeset_transcript_file = checkDirectoryFiles()
ensembl_exon_db,ensembl_annot_db,exon_clusters,intron_clusters,exon_region_db,intron_retention_db,ucsc_splicing_annot_db,ens_transcript_db = EnsemblImport.getEnsemblAssociations(species,source_biotype,test)
ensembl_probeset_db = getProbesetAssociations(probeset_transcript_file,ensembl_exon_db,ens_transcript_db,source_biotype)
SubGeneViewerExport.reorganizeData(species) ### reads in the data from the external generated files
status = 'ran'
if (process_from_scratch == 'no') or (status == 'ran'):
if source_biotype == 'ncRNA':
probeset_db_mRNA,constitutive_db = reimportEnsemblProbesets('AltDatabase/'+species+'/'+arraytype+'/'+species+'_Ensembl_probesets.txt')
probeset_db_ncRNA,constitutive_db = reimportEnsemblProbesets('AltDatabase/'+species+'/'+arraytype+'/'+species+'_Ensembl_probesets_ncRNA.txt')
probeset_db = {}
#print len(probeset_db_mRNA), len(probeset_db_ncRNA), len(probeset_db)
for probeset in probeset_db_ncRNA:
if probeset not in probeset_db_mRNA: probeset_db[probeset] = probeset_db_ncRNA[probeset]
probeset_db_mRNA={}; probeset_db_ncRNA={}
else:
filename = 'AltDatabase/'+species+'/'+arraytype+'/'+species+'_Ensembl_probesets.txt'
if arraytype != 'RNASeq':
probeset_db,constitutive_db = reimportEnsemblProbesets(filename)
else:
exon_standard_dir = string.replace(filename,'_probesets.txt','_exons.txt')
probeset_db,constitutive_db = reimportEnsemblProbesets(exon_standard_dir)
filename = string.replace(database_root_dir+filename,'_probesets.txt','_junctions.txt')
probeset_db,constitutive_db = reimportEnsemblProbesets(filename,probe_db=probeset_db,cs_db=constitutive_db) ### These only include exons and junctions detected from the experiment
annotate_db = EnsemblImport.reimportEnsemblAnnotations(species)
splicing_analysis_db = getSplicingAnalysisProbesets(probeset_db,constitutive_db,annotate_db)
#print "Probeset database and Annotation database reimported"
#print "STATs: probeset database:",len(probeset_db),"probesets imported"
#print " annotation database:",len(annotate_db),"genes imported"
return probeset_db,annotate_db,constitutive_db,splicing_analysis_db
if __name__ == '__main__':
y = 'Ensembl'
z = 'custom'
m = 'Mm'
h = 'Hs'
r = 'Rn'
source_biotype = 'mRNA'
Species = h
process_from_scratch = 'yes'
export_probeset_mRNA_associations = 'no'
constitutive_source = z ###If 'Ensembl', program won't look at any evidence except for Ensembl. Thus, not ideal
array_type='exon'
#getJunctionComparisonsFromExport(Species,array_type); sys.exit()
#testAffyAnnotationDownload(Species,array_type); sys.exit()
probeset_db,annotate_db,constitutive_db,splicing_analysis_db = getAnnotations(process_from_scratch,constitutive_source,source_biotype,Species)
sys.exit()
"""Annotate Affymetrix exon array data using files Ensembl data (sync'ed to genome release)."""
### probeset_db[probeset] = gene,exon_number,ensembl_exon_annotation,ensembl_exon_id
#NEW probeset_db[probeset] = gene,transcluster,exon_id,ens_exon_ids,exon_annotations,constitutive
### NA constitutive_db[gene] = [probeset]
### annotate_db[gene] = definition, symbol,rna_processing
global species; species = Species
global test; global test_cluster
global filter_sgv_output
export_probeset_mRNA_associations = 'no'
filter_sgv_output = 'yes'
test = 'yes'
test_cluster = ['7958644']
meta_test_cluster = ["3061319","3061268"]#,"3455632","3258444","3965936","2611056","3864519","3018509","3707352","3404496","3251490"]
#test_cluster = meta_test_cluster
partial_process = 'no'; status = 'null'
if process_from_scratch == 'yes':
if partial_process == 'no':
#"""
global ensembl_exon_db; global ensembl_exon_db; global exon_clusters
global exon_region_db; global intron_retention_db; global ucsc_splicing_annot_db
probeset_transcript_file = checkDirectoryFiles()
ensembl_exon_db,ensembl_annot_db,exon_clusters,intron_clusters,exon_region_db,intron_retention_db,ucsc_splicing_annot_db,ens_transcript_db = EnsemblImport.getEnsemblAssociations(species,source_biotype,test)
#trans_annotation_db = ExonArrayAffyRules.getTranscriptAnnotation(transcript_annotation_file,species,test,test_cluster) ###used to associate transcript_cluster ensembl's
#"""
ensembl_probeset_db = getProbesetAssociations(probeset_transcript_file,ensembl_exon_db,ens_transcript_db,source_biotype)
SubGeneViewerExport.reorganizeData(species) ### reads in the data from the external generated files
status = 'ran'
if (process_from_scratch == 'no') or (status == 'ran'):
probeset_db,constitutive_db = reimportEnsemblProbesets('AltDatabase/'+species+'/'+arraytype+'/'+species+'_Ensembl_probesets.txt')
annotate_db = EnsemblImport.reimportEnsemblAnnotations(species)
splicing_analysis_db = getSplicingAnalysisProbesets(probeset_db,constitutive_db,annotate_db)
print "Probeset database and Annotation database reimported"
print "STATs: probeset database:",len(probeset_db),"probesets imported"
print " annotation database:",len(annotate_db),"genes imported"
probeset_transcript_file = checkDirectoryFiles()
ensembl_exon_db,ensembl_annot_db,exon_clusters,intron_clusters,exon_region_db,intron_retention_db,ucsc_splicing_annot_db,ens_transcript_db = EnsemblImport.getEnsemblAssociations(species,source_biotype,test)
#trans_annotation_db = ExonArrayAffyRules.getTranscriptAnnotation(transcript_annotation_file,species,test,test_cluster) ###used to associate transcript_cluster ensembl's
#"""
ensembl_probeset_db = getProbesetAssociations(probeset_transcript_file,ensembl_exon_db,ens_transcript_db,source_biotype)
SubGeneViewerExport.reorganizeData(species) ### reads in the data from the external generated files
status = 'ran'
if (process_from_scratch == 'no') or (status == 'ran'):
probeset_db,constitutive_db = reimportEnsemblProbesets('AltDatabase/'+species+'/'+arraytype+'/'+species+'_Ensembl_probesets.txt')
annotate_db = EnsemblImport.reimportEnsemblAnnotations(species)
splicing_analysis_db = getSplicingAnalysisProbesets(probeset_db,constitutive_db,annotate_db)
print "Probeset database and Annotation database reimported"
print "STATs: probeset database:",len(probeset_db),"probesets imported"
print " annotation database:",len(annotate_db),"genes imported"
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ExonArrayEnsemblRules.py
|
ExonArrayEnsemblRules.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def parse_affymetrix_annotations(filename):
temp_affy_db = {}
x=0
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
probeset_data,null = string.split(line,'\n') #remove endline
affy_data = string.split(probeset_data[1:-1],'","') #remove endline
if x==0:
if probeset_data[0] == '#':
continue
x +=1
affy_headers = affy_data
else:
x +=1
probesets = affy_data[0]
temp_affy_db[probesets] = affy_data[1:]
for header in affy_headers:
x = 0; eg = ''; gs = ''
while x < len(affy_headers):
if 'rocess' in affy_headers[x]: gb = x - 1
if 'omponent' in affy_headers[x]: gc = x - 1
if 'olecular' in affy_headers[x]: gm = x - 1
if 'athway' in affy_headers[x]: gp = x - 1
if 'Gene Symbol' in affy_headers[x]: gs = x - 1
if 'Ensembl' in affy_headers[x]: eg = x - 1
x += 1
###Below code used if human exon array parsed
global analyze_human_exon_data
analyze_human_exon_data = 'no'
if eg == '':
x = 0
while x < len(affy_headers):
if 'mrna_assignment' in affy_headers[x]:
eg = x - 1
analyze_human_exon_data = 'yes'
x+=1
for probeset in temp_affy_db:
affy_data = temp_affy_db[probeset]
try:
go_bio = affy_data[gb]
except IndexError:
###Occurs due to a new line error
continue
go_com = affy_data[gc]
go_mol = affy_data[gm]
genmapp = affy_data[gp]
if gs == '': symbol = ''
else: symbol = affy_data[gs]
if analyze_human_exon_data == 'no':
ensembl = affy_data[eg]
else:
ensembl_data = affy_data[eg]
ensembl=''
try:
if 'gene:ENSMUSG' in ensembl_data:
ensembl_data = string.split(ensembl_data,'gene:ENSMUSG')
ensembl_data = string.split(ensembl_data[1],' ')
ensembl = 'ENSMUSG'+ ensembl_data[0]
if 'gene:ENSG' in ensembl_data:
ensembl_data = string.split(ensembl_data,'gene:ENSG')
ensembl_data = string.split(ensembl_data[1],' ')
ensembl = 'ENSG'+ ensembl_data[0]
except IndexError:
continue
goa=[]
goa = merge_go_annoations(go_bio,goa)
goa = merge_go_annoations(go_com,goa)
goa = merge_go_annoations(go_mol,goa)
goa = merge_go_annoations(genmapp,goa)
goa=unique.unique(goa); goa.sort();
goa = string.join(goa,'')
try:
ensembl = string.split(ensembl,' /// ')
except ValueError:
ensembl = [ensembl]
for ensembl_id in ensembl:
if len(goa)>10:
go_annotations[ensembl_id] = goa, symbol
def merge_go_annoations(go_category,goa):
dd = ' // '
td = ' /// '
if td in go_category:
go_split = string.split(go_category,td)
for entry in go_split:
if analyze_human_exon_data == 'no':
try:
a,b,null = string.split(entry,dd )
entry = b+dd
goa.append(entry)
except ValueError:
###occurs with GenMAPP entries
a,null = string.split(entry,dd )
entry = a+dd
goa.append(entry)
else:
try:
f,a,b,null = string.split(entry,dd )
entry = b+dd
goa.append(entry)
except ValueError:
###occurs with GenMAPP entries
f,null,a = string.split(entry,dd )
entry = a+dd
goa.append(entry)
else:
if go_category != '---':
if dd in go_category:
if analyze_human_exon_data == 'no':
try:
a,null = string.split(go_category,dd)
entry = a+dd
except ValueError:
a,b,null = string.split(go_category,dd )
entry = b+dd
goa.append(entry)
else:
try:
f,null,a = string.split(go_category,dd)
entry = a+dd
except ValueError:
f,a,b,null = string.split(go_category,dd )
entry = b+dd
goa.append(entry)
else:
goa.append(go_category)
return goa
def getDirectoryFiles(dir,species):
dir_list = read_directory('/AltDatabase/'+species+'/exon') #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = dir[1:]+'/'+data
if 'MoEx-1_0-st-transcript-annot.csv' in affy_data_dir and species == 'Mm':
transcript_annotation_file = affy_data_dir
elif 'HuEx-1_0-st-transcript-annot.csv' in affy_data_dir and species == 'Hs':
transcript_annotation_file = affy_data_dir
elif 'annot.hg' in affy_data_dir:
probeset_transcript_file = affy_data_dir
return transcript_annotation_file
def parseAffyGO(use_exon_data,get_splicing_factors,species):
print "Parsing out Affymetrix GO annotations"
global go_annotations; go_annotations={}
import_dir = '/AltDatabase/affymetrix/'+species+'/'
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for affy_data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+affy_data
if use_exon_data == 'yes':
affy_data_dir = getDirectoryFiles('/AltDatabase/'+species+'/exon',species)
try: parse_affymetrix_annotations(affy_data_dir)
except Exception: null=[]
print len(go_annotations),"Ensembl gene annotations parsed"
if get_splicing_factors == 'yes':
go = go_annotations; mRNA_processing_ensembl=[]
for entry in go_annotations:
if 'splicing' in go[entry][0] or ('mRNA' in go[entry][0] and 'processing' in go[entry][0]):
mRNA_processing_ensembl.append(entry)
print len(mRNA_processing_ensembl),"mRNA processing/splicing regulators identified"
return mRNA_processing_ensembl
else:
return go_annotations
if __name__ == '__main__':
go = parseAffyGO('yes','no','Hs')
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/GO_parsing.py
|
GO_parsing.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
_script = 'AltAnalyzeViewer.py'
_appName = "AltAnalyzeViewer"
_appVersion = '1.0.1'
_appDescription = "AltAnalyze is a freely available, open-source and cross-platform program that allows you to take RNASeq or "
_appDescription +="relatively raw microarray data (CEL files or normalized), identify predicted alternative splicing or alternative "
_appDescription +="promoter changes and view how these changes may affect protein sequence, domain composition, and microRNA targeting."
_authorName = 'Nathan Salomonis'
_authorEmail = '[email protected]'
_authorURL = 'http://www.altanalyze.org'
_appIcon = "Viewer.ico"
excludes = ["igraph","patsy","pandas","suds","lxml","cairo","cairo2","ImageTk","PIL","Pillow","mpmath",'pysam','Bio']
excludes = ["igraph","patsy","pandas","suds","lxml","cairo","cairo2","mpmath",'virtualenv','Tkinter','matplotlib.tests',"Pillow"]
#excludes = []
includes = ["wx"] #["suds", "mpmath", "numpy"]
includes = ["mpmath", "numpy",'pysam.TabProxies','pysam.ctabixproxies','dbhash','anydbm']
""" By default, suds will be installed in site-packages as a .egg file (zip compressed). Make a duplicate, change to .zip and extract
here to allow it to be recognized by py2exe (must be a directory) """
matplot_exclude = [] #['MSVCP90.dll']
scipy_exclude = [] #['libiomp5md.dll','libifcoremd.dll','libmmd.dll']
""" xml.sax.drivers2.drv_pyexpat is an XML parser needed by suds that py2app fails to include. Identified by looking at the line: parser_list+self.parsers in
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/PyXML-0.8.4-py2.7-macosx-10.6-intel.egg/_xmlplus/sax/saxexts.py
check the py2app print out to see where this file is in the future
(reported issue - may or may not apply) For mac and igraph, core.so must be copied to a new location for py2app:
sudo mkdir /System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/lib-dynload/igraph/
cp /Library/Python/2.6/site-packages/igraph/core.so /System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/lib-dynload/igraph/
"""
if sys.platform.startswith("darwin"):
### Local version: /usr/local/bin/python2.6
### example command: python setup.py py2app
from distutils.core import setup
import py2app
#import lxml
includes+= ["pkg_resources","distutils","lxml.etree","lxml._elementpath",'pysam.TabProxies','pysam.ctabixproxies'] #"xml.sax.drivers2.drv_pyexpat"
"""
resources = ['/System/Library/Frameworks/Python.framework/Versions/2.6/include/python2.6/pyconfig.h']
frameworks = ['/System/Library/Frameworks/Python.framework/Versions/2.6/include/python2.6/pyconfig.h']
frameworks += ['/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/pkg_resources.py']
frameworks += ['/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/distutils/util.py']
frameworks += ['/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/distutils/sysconfig.py']
import pkg_resources
import distutils
import distutils.sysconfig
import distutils.util
"""
options = {"py2app":
{"excludes": excludes,
"includes": includes,
#"frameworks": frameworks,
#"resources": resources,
"argv_emulation": True,
'arch': 'i386', ## for wx
"iconfile": "Viewer.icns"}
}
setup(name=_appName,
app=[_script],
version=_appVersion,
description=_appDescription,
author=_authorName,
author_email=_authorEmail,
url=_authorURL,
options=options,
#data_files=data_files,
setup_requires=["py2app"]
)
import unique, shutil
root_path = unique.filepath('')
software_path = root_path+'/dist/AltAnalyzeViewer.app/Contents/Frameworks/Tcl.framework'
shutil.rmtree(software_path)
software_path = root_path+'/dist/AltAnalyzeViewer.app/Contents/Frameworks/Tk.framework'
shutil.rmtree(software_path)
software_path = root_path+'/dist/AltAnalyzeViewer.app/Contents/Resources/mpl-data/sample_data'
shutil.rmtree(software_path)
software_path = root_path+'/dist/AltAnalyzeViewer.app/Contents/Resources/lib/python2.7/matplotlib/tests'
shutil.rmtree(software_path)
if sys.platform.startswith("win"):
### example command: python setup.py py2exe
from distutils.core import setup
import py2exe
import suds
import numpy
import matplotlib
import unique
import lxml
import sys
import six ### relates to a date-time dependency in matplotlib
import pysam
import TabProxies
import ctabix
import csamtools
import cvcf
import dbhash
import anydbm
#sys.path.append(unique.filepath("Config\DLLs")) ### This is added, but DLLs still require addition to DLL python dir
from distutils.filelist import findall
import os
data_files=matplotlib.get_py2exe_datafiles()
matplotlibdatadir = matplotlib.get_data_path()
matplotlibdata = findall(matplotlibdatadir)
matplotlibdata_files = []
for f in matplotlibdata:
dirname = os.path.join('matplotlibdata', f[len(matplotlibdatadir)+1:])
matplotlibdata_files.append((os.path.split(dirname)[0], [f]))
windows=[{"script":_script,"icon_resources":[(1,_appIcon)]}]
options={'py2exe':
{
"includes": 'suds',
"includes": 'lxml',
'includes': 'lxml.etree',
'includes': 'lxml._elementpath',
"includes": 'matplotlib',
"includes": 'mpl_toolkits',
"includes": 'matplotlib.backends.backend_tkagg',
"dll_excludes": matplot_exclude+scipy_exclude,
}}
setup(
windows = windows,
options = options,
version=_appVersion,
description=_appDescription,
author=_authorName,
author_email=_authorEmail,
url=_authorURL,
data_files=matplotlibdata_files+data_files,
)
if sys.platform.startswith("2linux"):
# bb_setup.py
from bbfreeze import Freezer
f = Freezer(distdir="bb-binary")
f.addScript("RemoteViewer.py")
f()
if sys.platform.startswith("linux"):
### example command: python setup.py build
includes = ['matplotlib','mpl_toolkits','matplotlib.backends.backend_tkagg']
includefiles = []
from cx_Freeze import setup, Executable
### use to get rid of library.zip and move into the executable, along with appendScriptToLibrary and appendScriptToExe
#buildOptions = dict(create_shared_zip = False)
setup(
name = _appName,
version=_appVersion,
description=_appDescription,
author=_authorName,
author_email=_authorEmail,
url=_authorURL,
#options = dict(build_exe = buildOptions),
options = {"build_exe": {"includes":includes, "include_files": includefiles}},
executables = [Executable(_script,
#appendScriptToExe=True,
#appendScriptToLibrary=False,
#icon='goelite.ico',
compress=True)],
)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/setupRemoteViewer.py
|
setupRemoteViewer.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
try: from build_scripts import ExonArrayEnsemblRules
except Exception: pass
try: from build_scripts import EnsemblImport
except Exception: pass
import shutil
try: from build_scripts import JunctionArray
except Exception: pass
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def eliminateRedundant(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
############# Affymetrix NextGen Junction Array Code
def importEnsemblUCSCAltJunctions(species,type):
if type == 'standard':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_alternative_junctions.txt'
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_alternative_junctions'+type+'.txt'
fn=filepath(filename); x = 0; gene_junction_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1
else:
gene,critical_exon,junction1,junction2,splice_event = string.split(data,'\t')
if critical_exon in junction1: incl_junction = junction1; excl_junction = junction2
else: incl_junction = junction2; excl_junction = junction1
gene_junction_db[gene,critical_exon,incl_junction,excl_junction]=[]
#try: gene_junction_db[gene,incl_junction,excl_junction].append(critical_exon)
#except KeyError: gene_junction_db[gene,incl_junction,excl_junction] = [critical_exon]
print len(gene_junction_db), 'alternative junction-pairs identified from Ensembl and UCSC'
return gene_junction_db
def getJunctionComparisonsFromExport(species,array_type):
type = 'standard'
gene_junction_db = importEnsemblUCSCAltJunctions(species,type)
### Retrieve probesets with exon-junctions associated - these are critical exons
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
gene_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(filename,'junctions',{})
left={}; right={}; gene_db={}; gene_exon_db={}; nonjunction_aligning={}
for gene in gene_probeset_db:
for (probe_data,ed) in gene_probeset_db[gene]:
probeset, strand, probeset_start, probeset_stop = probe_data
region_id = string.replace(ed.RegionNumber(),'-','.')
original_region_id = region_id
region_ids = string.split(region_id,'|')
gene_db[probeset[:-2]]=gene
#ed.AssociatedSplicingJunctions()
r_starts=string.split(ed.ExonStart(),'|'); r_stops=string.split(ed.ExonStop(),'|')
for region_id in region_ids:
if '|5' in probeset:
try: left[probeset[:-2]].append(region_id)
except Exception: left[probeset[:-2]]=[region_id]
if strand == '+': ### If the junction probesets DO NOT align to the region coordinates, then the probeset maps to a junction outside the database
if probeset_stop not in r_stops: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_stop,'left'
elif probeset_start not in r_starts: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_start,'left'
elif '|3' in probeset:
try: right[probeset[:-2]].append(region_id)
except Exception: right[probeset[:-2]]=[region_id]
if strand == '+':
if probeset_start not in r_starts: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_start,'right'
elif probeset_stop not in r_stops: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_stop,'right'
else:
if '_' in region_id: print killer
try: gene_exon_db[gene,region_id].append(probeset)
except Exception: gene_exon_db[gene,region_id] = [probeset]
print 'len(nonjunction_aligning)',len(nonjunction_aligning)
gene_exon_db = eliminateRedundant(gene_exon_db)
junction_db={} ### Get the exon-region IDs for an exon-junction
for probeset in left:
gene = gene_db[probeset]
if probeset in right:
for region1 in left[probeset]:
for region2 in right[probeset]:
junction = region1+'-'+region2
try: junction_db[gene,junction].append(probeset)
except Exception: junction_db[gene,junction] = [probeset]
probeset_junction_export = 'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps.txt'
fn=filepath(probeset_junction_export); data = open(fn,'w')
print "Exporting",probeset_junction_export
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title); temp_list=[]
for (gene,critical_exon,incl_junction,excl_junction) in gene_junction_db:
if (gene,incl_junction) in junction_db:
incl_junction_probesets = junction_db[gene,incl_junction]
if (gene,excl_junction) in junction_db:
excl_junction_probesets = junction_db[gene,excl_junction]
for incl_junction_probeset in incl_junction_probesets:
for excl_junction_probeset in excl_junction_probesets:
try:
for incl_exon_probeset in gene_exon_db[gene,critical_exon]:
if incl_junction_probeset in nonjunction_aligning or excl_junction_probeset in nonjunction_aligning: null=[]
else: ### Ensure the probeset DOES map to the annotated junctions
temp_list.append(string.join([gene,critical_exon,excl_junction,critical_exon,excl_junction_probeset,incl_exon_probeset,'AltAnalyze'],'\t')+'\n')
except Exception: null=[]
if incl_junction_probeset in nonjunction_aligning:
new_region_id, side = nonjunction_aligning[incl_junction_probeset]
incl_junction = renameJunction(incl_junction,side,new_region_id)
if excl_junction_probeset in nonjunction_aligning:
new_region_id, side = nonjunction_aligning[excl_junction_probeset]
excl_junction = renameJunction(excl_junction,side,new_region_id)
if excl_junction_probeset!=incl_junction_probeset:
temp_list.append(string.join([gene,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,'AltAnalyze'],'\t')+'\n')
temp_list = unique.unique(temp_list)
for i in temp_list: data.write(i)
data.close()
print 'Number of compared junctions exported', len(temp_list)
def renameJunction(junction_id,side,new_region_id):
try: l,r = string.split(junction_id,'-')
except Exception: print junction_id;kill
if side == 'left': junction_id = new_region_id+'-'+r
else: junction_id = l+'-'+new_region_id
return junction_id
class JunctionInformation:
def __init__(self,gene,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset,source):
self._gene = gene; self._critical_exon = critical_exon; self.excl_junction = excl_junction; self.incl_junction = incl_junction
self.excl_probeset = excl_probeset; self.incl_probeset = incl_probeset; self.source = source
self.critical_exon_sets = [critical_exon]
def GeneID(self): return str(self._gene)
def CriticalExon(self):
ce = str(self._critical_exon)
if '-' in ce: ce = string.replace(ce,'-','.')
return ce
def CriticalExonList(self):
critical_exon_str = self.CriticalExon()
critical_exons = string.split(critical_exon_str,'|')
return critical_exons
def ParentCriticalExon(self):
ce = str(self.CriticalExon())
if '_' in ce:
ces = string.split(ce,'|')
ces2=[]
for ce in ces:
if '_' in ce:
ce = string.split(ce,'_')[0]
ces2.append(ce)
ce = string.join(ces2,'|')
return ce
def setCriticalExons(self,critical_exons): self._critical_exon = critical_exons
def setCriticalExonSets(self,critical_exon_sets): self.critical_exon_sets = critical_exon_sets
def CriticalExonSets(self): return self.critical_exon_sets ### list of critical exons (can select any or all for functional analysis)
def setInclusionJunction(self,incl_junction): self.incl_junction = incl_junction
def InclusionJunction(self): return self.FormatJunction(self.incl_junction,self.InclusionProbeset())
def ExclusionJunction(self): return self.FormatJunction(self.excl_junction,self.ExclusionProbeset())
def FormatJunction(self,junction,probeset):
### Used for RNASeq when trans-splicing occurs
probeset_split = string.split(probeset,':') ### Indicates trans-splicing - two gene IDs listed
if len(probeset_split)>2:
junction = probeset
elif 'E0.1' in junction:
junction = 'U'+junction[1:]
return str(junction)
def setInclusionProbeset(self,incl_probeset): self.incl_probeset = incl_probeset
def InclusionProbeset(self): return self.FormatProbeset(self.incl_probeset)
def ExclusionProbeset(self): return self.FormatProbeset(self.excl_probeset)
def FormatProbeset(self,probeset):
if ':' in probeset:
probeset = string.split(probeset,':')[1]
elif '@' in probeset:
probeset = reformatID(probeset)
return str(probeset)
def DataSource(self): return str(self.source)
def OutputLine(self):
value = string.join([self.GeneID(),self.CriticalExon(),self.ExclusionJunction(),self.InclusionJunction(),self.ExclusionProbeset(),self.InclusionProbeset(),self.DataSource()],'\t')+'\n'
return value
def __repr__(self): return self.GeneID()
def formatID(id):
### JunctionArray methods handle IDs with ":" different than those that lack this
return string.replace(id,':','@')
def reformatID(id):
### JunctionArray methods handle IDs with ":" different than those that lack this
return string.replace(id,'@',':')
def reimportJunctionComps(species,array_type,file_type):
if len(species[0])>1:
species, root_dir = species
else: species = species
if len(file_type) == 2:
file_type, filter_db = file_type; filter='yes'
else: filter_db={}; filter='no'
filename = 'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps.txt'
if file_type == 'updated':
filename = string.replace(filename,'.txt','_updated.txt')
if array_type == 'RNASeq': filename = root_dir+filename
fn=filepath(filename); junction_inclusion_db={}; x=0
for line in open(fn,'rU').xreadlines():
if x==0: x=1
else:
data = cleanUpLine(line)
try: gene,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source = string.split(data,'\t')
except Exception: print data;kill
proceed = 'yes'
if filter == 'yes':
if gene not in filter_db: proceed = 'no'
if proceed == 'yes':
if array_type == 'RNASeq':
### Reformat IDs, as junction arrays interpret ':' as a separator to remove the gene ID
excl_junction_probeset = formatID(excl_junction_probeset)
incl_junction_probeset = formatID(incl_junction_probeset)
if file_type == 'updated':
### Add exclusion-junction versus inclusion-exon
critical_exons = string.split(critical_exon,'|')
for ce in critical_exons:
critical_exon_probeset = formatID(gene+':'+ce)
ji=JunctionInformation(gene,critical_exon,excl_junction,ce,excl_junction_probeset,critical_exon_probeset,source)
junction_inclusion_db[excl_junction_probeset,critical_exon_probeset] = [ji]
ji=JunctionInformation(gene,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if ':' in excl_junction_probeset:
tc,excl_junction_probeset=string.split(excl_junction_probeset,':')
tc,incl_junction_probeset=string.split(incl_junction_probeset,':')
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
if array_type != 'RNASeq':
print len(junction_inclusion_db),'reciprocol junctions imported'
return junction_inclusion_db
def importAndReformatEnsemblJunctionAnnotations(species,array_type,nonconstitutive_junctions):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
export_filepath = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
efn=filepath(export_filepath); export_data = open(efn,'w')
fn=filepath(filename); x = 0; ensembl_exon_db={}; left={}; right={}; exon_gene_db={}; nonjunction_aligning={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1; export_data.write(data+'\n')
else:
t = string.split(data,'\t')
probeset, exon_id, ensembl_gene_id, transcript_cluster_id, chr, strand, probeset_start, probeset_stop, affy_class, constitutitive_probeset, ens_exon_ids, exon_annotations,regionid,r_start,r_stop,splice_event,splice_junctions = t
if len(regionid)<1: regionid = exon_id; t[12] = exon_id
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
tc,probeset=string.split(probeset,':'); regionid = string.replace(regionid,'-','.'); original_region_id = regionid
r_starts=string.split(r_start,'|'); r_stops=string.split(r_stop,'|')
ed = EnsemblImport.ExonStructureData(ensembl_gene_id, chr, strand, probeset_start, probeset_stop, constitutitive_probeset, ens_exon_ids, []); ed.reSetExonID(regionid)
if '|5' in probeset:
left[probeset[:-2]] = ed,t
if strand == '+': ### If the junction probesets DO NOT align to the region coordinates, then the probeset maps to a junction outside the database
if probeset_stop not in r_stops: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_stop,'left'
elif probeset_start not in r_starts: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_start,'left'
elif '|3' in probeset:
right[probeset[:-2]] = ed,t
if strand == '+':
if probeset_start not in r_starts: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_start,'right'
elif probeset_stop not in r_stops: nonjunction_aligning[probeset[:-2]] = original_region_id+'_'+probeset_stop,'right'
else:
t[0] = probeset
ensembl_exon_db[probeset] = ed
export_data.write(string.join(t,'\t')+'\n')
regionids = string.split(regionid,'|')
for regionid in regionids: exon_gene_db[ensembl_gene_id,regionid] = probeset
for probeset in left:
if probeset in right:
l,pl = left[probeset]; r,pr = right[probeset]
if l.Constitutive() != r.Constitutive(): l.setConstitutive('no') ### used to determine if a junciton is alternative or constitutive
if probeset in nonconstitutive_junctions: l.setConstitutive('no')
l.setJunctionCoordinates(l.ExonStart(),l.ExonStop(),r.ExonStart(),r.ExonStop())
ens_exon_idsl = pl[10]; ens_exon_idsr = pr[10]; exon_idl = pl[1]; exon_idr = pr[1]
regionidl = pl[12]; regionidr = pr[12]; splice_junctionsl = pl[-1]; splice_junctionsr = pr[-1]
exon_idl = string.replace(exon_idl,'-','.'); exon_idr = string.replace(exon_idr,'-','.')
regionidl_block = string.split(regionidl,'-')[0]; regionidr_block = string.split(regionidr,'-')[0]
if regionidl_block != regionidr_block: ### Otherwise, the junction is probing a single exon block and thus is not informative
regionidl = string.replace(regionidl,'-','.'); regionidr = string.replace(regionidr,'-','.')
exon_id = exon_idl+'-'+exon_idr; regionid = regionidl+'-'+regionidr
if probeset in nonjunction_aligning:
new_region_id, side = nonjunction_aligning[probeset]
regionid = renameJunction(regionid,side,new_region_id)
l.reSetExonID(regionid); ensembl_exon_db[probeset] = l
splice_junctionsl+=splice_junctionsr
ens_exon_idsl = string.split(ens_exon_idsl,'|'); ens_exon_idsr = string.split(ens_exon_idsr,'|')
ens_exon_ids=string.join(unique.unique(ens_exon_idsl+ens_exon_idsr),'|')
pl[10] = ens_exon_ids; pl[12] = regionid; pl[1] = exon_id; pl[-1] = splice_junctionsl
pl[13] = l.ExonStart()+'|'+l.ExonStop(); pl[14] = r.ExonStart()+'|'+r.ExonStop()
strand = pl[5]
if strand == '+':
pl[6] = l.ExonStop(); pl[7] = r.ExonStart() ### juncstion splice-sites
else:
pl[6] = l.ExonStart(); pl[7] = r.ExonStop() ### juncstion splice-sites
pl[0] = probeset; pl[9] = l.Constitutive()
pl = string.join(pl,'\t')+'\n'
export_data.write(pl)
export_data.close()
return ensembl_exon_db,exon_gene_db
################## AltMouse and Generic Junction Array Analysis
def importCriticalExonLocations(species,array_type,ensembl_exon_db,force):
###ensembl_exon_db[(geneid,chr,strand)] = [[E5,exon_info]] #exon_info = (exon_start,exon_stop,exon_id,exon_annot)
###ensembl_probeset_db[geneid,chr,strand].append(probeset_data) #probeset_data = [start,stop,probeset_id,exon_class,transcript_cluster_id]
gene_info_db = {}
for (ens_geneid,chr,strand) in ensembl_exon_db: gene_info_db[ens_geneid] = chr,strand
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations.txt'
array_ensembl={}
###Get the most recent gene-symbol annotations (applicable with a new Ensembl build for the same genomic build)
ensembl_symbol_db = getEnsemblAnnotations(species)
primary_gene_annotation_file = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
update.verifyFile(primary_gene_annotation_file,array_type)
array_gene_annotations = JunctionArray.importGeneric(primary_gene_annotation_file)
for array_geneid in array_gene_annotations:
t = array_gene_annotations[array_geneid]; description=t[0];entrez=t[1];symbol=t[2]
if symbol in ensembl_symbol_db and len(symbol)>0 and len(array_geneid)>0:
ens_geneid = ensembl_symbol_db[symbol]
if len(ens_geneid)>0: array_ensembl[array_geneid]= ens_geneid
update.verifyFile(filename,array_type)
ensembl_probeset_db = importJunctionLocationData(filename,array_ensembl,gene_info_db,test)
print len(ensembl_probeset_db), "Genes inlcuded in",array_type,"location database"
return ensembl_probeset_db
def importJunctionLocationData(filename,array_ensembl,gene_info_db,test):
fn=filepath(filename); key_db = {}; x = 0; y = 0; ensembl_probeset_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
k=0
array_geneid,exonid,ens_geneid,start,stop,gene_start,gene_stop,exon_seq = string.split(data,'\t')
probeset_id = array_geneid+':'+exonid
if test == 'yes':
if array_geneid in test_cluster: k=1
else: k = 1
if k==1:
if array_geneid in array_ensembl: ens_geneid = array_ensembl[array_geneid]; y+=1 ### Over-ride any outdated assocations
if ens_geneid in gene_info_db:
chr,strand = gene_info_db[ens_geneid]
probeset_data = [start,stop,probeset_id,'core',array_geneid]
try: ensembl_probeset_db[ens_geneid,'chr'+chr,strand].append(probeset_data)
except KeyError: ensembl_probeset_db[ens_geneid,'chr'+chr,strand] = [probeset_data]
print y,"ArrayID to Ensembl genes re-annotated..."
return ensembl_probeset_db
def getEnsemblAnnotations(species):
filename = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
ensembl_annotation_db = {}
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
ensembl_gene_id,description,symbol = string.split(data,'\t')
ensembl_annotation_db[symbol] = ensembl_gene_id
return ensembl_annotation_db
def getAnnotations(Species,array_type,reannotate_exon_seq,force):
"""Annotate Affymetrix exon array data using files Ensembl data (sync'ed to genome release)."""
global species; species = Species; global test; global test_cluster
test = 'no'; test_cluster = ['TC0701360']; data_type = 'mRNA'
global ensembl_exon_db; global ensembl_exon_db; global exon_clusters; global exon_region_db
ensembl_exon_db,ensembl_annot_db,exon_clusters,intron_clusters,exon_region_db,intron_retention_db,ucsc_splicing_annot_db,ens_transcript_db = EnsemblImport.getEnsemblAssociations(species,data_type,test)
ensembl_probeset_db = importCriticalExonLocations(species,array_type,ensembl_exon_db,force) ###Get Pre-computed genomic locations for critical exons
ensembl_probeset_db = ExonArrayEnsemblRules.annotateExons(ensembl_probeset_db,exon_clusters,ensembl_exon_db,exon_region_db,intron_retention_db,intron_clusters,ucsc_splicing_annot_db); constitutive_gene_db={}
ExonArrayEnsemblRules.exportEnsemblLinkedProbesets(array_type,ensembl_probeset_db,species)
print "\nCritical exon data exported coordinates, exon associations and splicing annotations exported..."
### Change filenames to reflect junction array type
export_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'; ef=filepath(export_filename)
export_replacement = string.replace(export_filename,'_probe','_'+array_type+'_probe')
er=filepath(export_replacement); shutil.copyfile(ef,er); os.remove(ef) ### Copy file to a new name
### Export full exon seqeunce for probesets/critical exons to replace the original incomplete sequence (used for miRNA analyses)
if reannotate_exon_seq == 'yes':
JunctionArray.reAnnotateCriticalExonSequences(species,array_type)
def annotateJunctionIDsAsExon(species,array_type):
from build_scripts import ExonSeqModule
probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junction_probesets-filtered.txt'
if array_type == 'RNASeq':
probeset_annotations_file = string.replace(probeset_annotations_file,'junction_probesets-filtered','exons')
junction_exon_db = ExonSeqModule.importSplicingAnnotationDatabase(probeset_annotations_file,array_type)
probeset_annotations_file = 'AltDatabase/'+species+'/exon/'+species+'_Ensembl_probesets.txt'
exon_db = ExonSeqModule.importSplicingAnnotationDatabase(probeset_annotations_file,array_type)
### Extract unique exon regions from Exon Array annotations
multiple_exon_regions={}; unique_exon_regions={}
for probeset in exon_db:
y = exon_db[probeset]
geneid = y.GeneID()
if '|' in y.ExonRegionID():
exonids = string.split(y.ExonRegionID(),'|')
for exonid in exonids: multiple_exon_regions[geneid,exonid] = y
else:
unique_exon_regions[geneid,y.ExonRegionID()] = y
### Add missing exons to unique
for uid in multiple_exon_regions:
if uid not in unique_exon_regions: unique_exon_regions[uid]=multiple_exon_regions[uid]
"""
for i in unique_exon_regions:
if 'ENSMUSG00000066842' in i:
print i
stop
"""
### Extract unique exon regions from Junction Array annotation
junction_to_exonids={}
for probeset in junction_exon_db:
if 'ENSMUSG00000066842' in probeset: print probeset
y = junction_exon_db[probeset]
geneid = y.GeneID()
if '|' in y.ExonRegionID():
exonids = string.split(y.ExonRegionID(),'|')
if probeset == 'ENSMUSG00000066842|E60.1': print [[exonids]]
for exonid in exonids:
if (geneid,exonid) in unique_exon_regions:
y = unique_exon_regions[geneid,exonid]
if probeset == 'ENSMUSG00000066842:E60.1': print [y.Probeset()]
junction_to_exonids[probeset] = y.Probeset()
else:
if (geneid,string.replace(y.ExonRegionID(),'.','-')) in unique_exon_regions:
#if ':' in probeset: print [probeset,y.ExonRegionID()];kill
y = unique_exon_regions[geneid,string.replace(y.ExonRegionID(),'.','-')]
junction_to_exonids[probeset] = y.Probeset()
output_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'-exon_probesets.txt'
fn=filepath(output_file); data = open(fn,'w')
data.write(array_type+'_probeset\texon_probeset\n')
for probeset in junction_to_exonids:
exon_probeset = junction_to_exonids[probeset]
data.write(probeset+'\t'+exon_probeset+'\n')
data.close()
if __name__ == '__main__':
m = 'Mm'; h = 'Hs'
Species = m
array_type = 'RNASeq' ###In theory, could be another type of junciton or combination array
annotateJunctionIDsAsExon(Species,array_type); sys.exit()
#reimportJunctionComps(Species,array_type,'original');kill
#JunctionArray.getJunctionExonLocations(Species,array_type)
"""
### Get UCSC associations (download databases if necessary)
from build_scripts import UCSCImport
mRNA_Type = 'mrna'; run_from_scratch = 'yes'; force='no'
export_all_associations = 'no' ### YES only for protein prediction analysis
UCSCImport.runUCSCEnsemblAssociations(Species,mRNA_Type,export_all_associations,run_from_scratch,force)
"""
getAnnotations(Species,array_type,'yes','no')
JunctionArray.identifyJunctionComps(Species,array_type)
#importAndReformatEnsemblJunctionAnnotations(Species,array_type)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/JunctionArrayEnsemblRules.py
|
JunctionArrayEnsemblRules.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from build_scripts import GO_parsing
import copy
import time
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def customDeepCopy(db):
db2={}
for i in db:
for e in db[i]:
try: db2[i].append(e)
except KeyError: db2[i]=[e]
return db2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
################### Import exon coordinate/transcript data from Ensembl
def importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db):
ensembl_ucsc_splicing_annotations={}
try: ensembl_ucsc_splicing_annotations = importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,{},'polyA')
except Exception: ensembl_ucsc_splicing_annotations={}
try: ensembl_ucsc_splicing_annotations = importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,ensembl_ucsc_splicing_annotations,'splicing')
except Exception: null=[]
return ensembl_ucsc_splicing_annotations
def importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,ensembl_ucsc_splicing_annotations,data_type):
ucsc_gene_coordinates={}
if data_type == 'splicing': filename = 'AltDatabase/ucsc/'+species+'/knownAlt.txt'
if data_type == 'polyA': filename = 'AltDatabase/ucsc/'+species+'/polyaDb.txt'
start_time = time.time()
fn=filepath(filename); x=0
verifyFile(filename,species) ### Makes sure file is local and if not downloads
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data_type == 'splicing':
regionid,chr,start,stop,event_call,null,strand = string.split(data,'\t')
if data_type == 'polyA':
event_call = 'alternative_polyA'
try: regionid,chr,start,stop,null,null,strand,start,stop = string.split(data,'\t')
except Exception: chr,start,stop,annotation,null,strand = string.split(data,'\t')
start = int(start)+1; stop = int(stop); chr = string.replace(chr,'chr','') ###checked it out and all UCSC starts are -1 from the correspond Ensembl start
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
try: ucsc_gene_coordinates[chr,start,stop,strand].append(event_call)
except KeyError: ucsc_gene_coordinates[chr,start,stop,strand] = [event_call]
print len(ucsc_gene_coordinates),'UCSC annotations imported.'
ensembl_chr_coordinate_db={}
for gene in ensembl_gene_coordinates:
a = ensembl_gene_coordinates[gene]; a.sort()
gene_start = a[0]; gene_stop = a[-1]
chr,strand = ensembl_annotations[gene]
if chr in ensembl_chr_coordinate_db:
ensembl_gene_coordinates2 = ensembl_chr_coordinate_db[chr]
ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
else:
ensembl_gene_coordinates2={}; ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
ensembl_chr_coordinate_db[chr]=ensembl_gene_coordinates2
ucsc_chr_coordinate_db={}
for geneid in ucsc_gene_coordinates:
chr,start,stop,strand = geneid
if chr in ucsc_chr_coordinate_db:
ucsc_gene_coordinates2 = ucsc_chr_coordinate_db[chr]
ucsc_gene_coordinates2[(start,stop)] = geneid,strand
else:
ucsc_gene_coordinates2={}; ucsc_gene_coordinates2[(start,stop)] = geneid,strand
ucsc_chr_coordinate_db[chr] = ucsc_gene_coordinates2
ensembl_transcript_clusters,no_match_list = getChromosomalOveralap(ucsc_chr_coordinate_db,ensembl_chr_coordinate_db)
ensembl_ucsc_splicing_event_db = {}
for clusterid in ensembl_transcript_clusters:
ens_geneids = ensembl_transcript_clusters[clusterid]
if len(ens_geneids)==1: ###If a cluster ID associates with multiple Ensembl IDs
ens_geneid = ens_geneids[0]
annotations = ucsc_gene_coordinates[clusterid]
try: ensembl_ucsc_splicing_event_db[ens_geneid].append((clusterid,annotations))
except KeyError: ensembl_ucsc_splicing_event_db[ens_geneid] = [(clusterid,annotations)]
for ensembl in ensembl_ucsc_splicing_event_db:
chr,strand = ensembl_annotations[ensembl]
key = ensembl,chr,strand
###Look through each of the annotations (with coordinate info) for those that are specifically AltPromoters
###Their coordinates occur overlapping but before the exon, so we want to change the coordinates
for (clusterid,annotations) in ensembl_ucsc_splicing_event_db[ensembl]:
new_coordinates = []
if 'altPromoter' in annotations:
chr,bp1,ep1,strand = clusterid
if key in exon_annotation_db:
exon_info_ls = exon_annotation_db[key]
for exon_info in exon_info_ls:
bp2 = exon_info[0]; ep2 = exon_info[0]; add = 0 ### Changed ep2 to be the second object in the list (previously it was also the first) 4-5-08
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
new_coordinates += [bp1,bp2,ep1,ep2] ###record all coordinates and take the extreme values
new_coordinates.sort()
if len(new_coordinates)>0:
new_start = new_coordinates[0]; new_stop = new_coordinates[-1]
clusterid = chr,new_start,new_stop,strand
annotation_str = string.join(annotations,'|')
###replace with new or old information
start = clusterid[1]; stop = clusterid[2]
try: ensembl_ucsc_splicing_annotations[ensembl].append((start,stop,annotation_str))
except KeyError: ensembl_ucsc_splicing_annotations[ensembl] = [(start,stop,annotation_str)]
if data_type == 'polyA':
### Only keep entries for which there are mulitple polyAs per gene
ensembl_ucsc_splicing_annotations_multiple={}
for ensembl in ensembl_ucsc_splicing_annotations:
if len(ensembl_ucsc_splicing_annotations[ensembl])>1:
ensembl_ucsc_splicing_annotations_multiple[ensembl] = ensembl_ucsc_splicing_annotations[ensembl]
ensembl_ucsc_splicing_annotations = ensembl_ucsc_splicing_annotations_multiple
print len(ensembl_ucsc_splicing_annotations),'genes with events added from UCSC annotations.'
return ensembl_ucsc_splicing_annotations
def getChromosomalOveralap(ucsc_chr_db,ensembl_chr_db):
print len(ucsc_chr_db),len(ensembl_chr_db); start_time = time.time()
"""Find transcript_clusters that have overlapping start positions with Ensembl gene start and end (based on first and last exons)"""
###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
y = 0; l =0; ensembl_transcript_clusters={}; no_match_list=[]
###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)
for chr in ucsc_chr_db:
ucsc_db = ucsc_chr_db[chr]
try:
for (bp1,ep1) in ucsc_db:
#print (bp1,ep1)
x = 0
gene_clusterid,ucsc_strand = ucsc_db[(bp1,ep1)]
try:
ensembl_db = ensembl_chr_db[chr]
for (bp2,ep2) in ensembl_db:
y += 1; ensembl,ens_strand = ensembl_db[(bp2,ep2)]
#print (bp1,ep1),(bp2,ep2);kill
if ucsc_strand == ens_strand:
###if the two gene location ranges overlapping
##########FORCE UCSC mRNA TO EXIST WITHIN THE SPACE OF ENSEMBL TO PREVENT TRANSCRIPT CLUSTER EXCLUSION IN ExonArrayEnsemblRules
add = 0
if (bp1 >= bp2) and (ep2>= ep1): add = 1 ###if the annotations reside within the gene's start and stop position
#if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
#elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
#if (bp1 >= bp2) and (ep2>= ep1): a = ''
#else: print gene_clusterid,ensembl,bp1,bp2,ep1,ep2;kill
x = 1
try: ensembl_transcript_clusters[gene_clusterid].append(ensembl)
except KeyError: ensembl_transcript_clusters[gene_clusterid] = [ensembl]
l += 1
except KeyError: null=[]#; print chr, 'not found'
if x == 0: no_match_list.append(gene_clusterid)
except ValueError:
for y in ucsc_db: print y;kill
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC genes matched up to Ensembl in %d seconds" % time_diff
print "UCSC Transcript Clusters (or accession numbers) overlapping with Ensembl:",len(ensembl_transcript_clusters)
print "With NO overlapp",len(no_match_list)
return ensembl_transcript_clusters,no_match_list
def reformatPolyAdenylationCoordinates(species,force):
""" PolyA annotations are currently only available from UCSC for human, but flat file
annotations from 2003-2006 are available for multiple species. Convert these to BED format"""
version={}
version['Rn'] = '2003(rn3)'
version['Dr'] = '2003(zv4)'
version['Gg'] = '2004(galGal2)'
version['Hs'] = '2006(hg8)'
version['Mm'] = '2004(mm5)'
print 'Exporting polyADB_2 coordinates as BED for',species
### Obtain the necessary database files
url = 'http://altanalyze.org/archiveDBs/all/polyAsite.txt'
output_dir = 'AltDatabase/ucsc/'+species + '/'
if force == 'yes':
filename, status = update.download(url,output_dir,'')
else: filename = output_dir+'polyAsite.txt'
### Import the refseq to Ensembl information
import gene_associations; from import_scripts import OBO_import; from build_scripts import EnsemblImport; import export
try:
ens_unigene = gene_associations.getGeneToUid(species,'Ensembl-UniGene')
print len(ens_unigene),'Ensembl-UniGene entries imported'
external_ensembl = OBO_import.swapKeyValues(ens_unigene); use_entrez='no'
except Exception:
ens_entrez = gene_associations.getGeneToUid(species,'Ensembl-EntrezGene')
print len(ens_entrez),'Ensembl-EntrezGene entries imported'
external_ensembl = OBO_import.swapKeyValues(ens_entrez); use_entrez='yes'
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
export_bedfile = output_dir+species+'_polyADB_2_predictions.bed'
print 'exporting',export_bedfile
export_data = export.ExportFile(export_bedfile)
header = '#'+species+'\t'+'polyADB_2'+'\t'+version[species]+'\n'
export_data.write(header)
fn=filepath(filename); x=0; not_found={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
siteid,llid,chr,sitenum,position,supporting_EST,cleavage = string.split(data,'\t')
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if species in siteid:
if 'NA' not in chr: chr = 'chr'+chr
strand = '+'; geneid = siteid
pos_start = str(int(position)-1); pos_end = position
if use_entrez=='no':
external_geneid = string.join(string.split(siteid,'.')[:2],'.')
else: external_geneid=llid
if external_geneid in external_ensembl:
ens_geneid = external_ensembl[external_geneid][0]
geneid += '-'+ens_geneid
chr,strand,start,end = gene_location_db[ens_geneid]
else:
not_found[external_geneid]=[]
bed_format = string.join([chr,pos_start,pos_end,geneid,'0','-'],'\t')+'\n' ### We don't know the strand, so write out both strands
export_data.write(bed_format)
bed_format = string.join([chr,pos_start,pos_end,geneid,'0',strand],'\t')+'\n'
export_data.write(bed_format)
export_data.close()
def verifyFile(filename,species_name):
fn=filepath(filename); counts=0
try:
for line in open(fn,'rU').xreadlines():
counts+=1
if counts>10: break
except Exception:
counts=0
if species_name == 'counts': ### Used if the file cannot be downloaded from http://www.altanalyze.org
return counts
elif counts == 0:
if species_name in filename: server_folder = species_name ### Folder equals species unless it is a universal file
elif 'Mm' in filename: server_folder = 'Mm' ### For PicTar
else: server_folder = 'all'
print 'Downloading:',server_folder,filename
update.downloadCurrentVersion(filename,server_folder,'txt')
else:
return counts
if __name__ == '__main__':
species = 'Hs'; #species_full = 'Drosophila_melanogaster'
filename = 'AltDatabase/ucsc/'+species+'/polyaDb.txt'
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
sys.exit()
importEnsExonStructureData(species,[],[],[]);sys.exit()
reformatPolyAdenylationCoordinates(species,'no');sys.exit()
#test = 'yes'
#test_gene = ['ENSG00000140153','ENSG00000075413']
from build_scripts import UCSCImport; import update
knownAlt_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','knownAlt.txt.gz')
polyA_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','polyaDb.txt.gz')
output_dir = 'AltDatabase/ucsc/'+species + '/'
UCSCImport.downloadFiles(knownAlt_dir,output_dir); UCSCImport.downloadFiles(polyA_dir,output_dir);sys.exit()
ensembl_ucsc_splicing_annotations = importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/alignToKnownAlt.py
|
alignToKnownAlt.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from stats_scripts import statistics
import math
from build_scripts import EnsemblImport; reload(EnsemblImport)
from build_scripts import ExonArrayEnsemblRules; reload(ExonArrayEnsemblRules)
from build_scripts import ExonArrayAffyRules
import ExpressionBuilder
import reorder_arrays
import time
import export
import traceback
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for file in dir_list:
if '.txt' in file: dir_list2.append(file)
return dir_list2
################# Begin Analysis
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>500:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys: del db_to_clear[key]
def exportMetaProbesets(array_type,species):
import AltAnalyze; reload(AltAnalyze)
import export
probeset_types = ['core','extended','full']
if array_type == 'junction': probeset_types = ['all']
for probeset_type in probeset_types:
exon_db,null = AltAnalyze.importSplicingAnnotations(array_type,species,probeset_type,'yes','')
gene_db={}; null=[]
for probeset in exon_db:
### At this point, exon_db is filtered by the probeset_type (e.g., core)
ensembl_gene_id = exon_db[probeset].GeneID()
try: gene_db[ensembl_gene_id].append(probeset)
except Exception: gene_db[ensembl_gene_id] = [probeset]
exon_db=[]; uid=0
output_dir = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'_'+probeset_type+'.mps'
#output_cv_dir = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Conversion_'+array_type+'_'+probeset_type+'.txt'
#data_conversion = export.ExportFile(output_cv_dir)
data = export.ExportFile(output_dir)
data.write('probeset_id\ttranscript_cluster_id\tprobeset_list\tprobe_count\n')
print "Exporting",len(gene_db),"to",output_dir
for ensembl_gene_id in gene_db:
probeset_strlist = string.join(gene_db[ensembl_gene_id],' '); uid+=1
line = string.join([str(uid),str(uid),probeset_strlist,str(len(gene_db[ensembl_gene_id])*4)],'\t')+'\n'
data.write(line)
#conversion_line = string.join([str(uid),ensembl_gene_id],'\t')+'\n'; data_conversion.write(conversion_line)
data.close(); #data_conversion.close()
def adjustCounts(exp_vals):
exp_vals2=[]
for i in exp_vals: exp_vals2.append(int(i)+1) ### Increment the rwcounts by 1
return exp_vals
def remoteExonProbesetData(filename,import_these_probesets,import_type,platform):
global array_type
array_type = platform
results = importExonProbesetData(filename,import_these_probesets,import_type)
return results
def importExonProbesetData(filename,import_these_probesets,import_type):
"""This is a powerfull function, that allows exon-array data import and processing on a line-by-line basis,
allowing the program to immediately write out data or summarize it without storing large amounts of data."""
fn=filepath(filename); start_time = time.time()
exp_dbase={}; filtered_exp_db={}; ftest_gene_db={}; filtered_gene_db={}; probeset_gene_db={}; biotypes={}
d = 0; x = 0
if 'stats.' in filename: filetype = 'dabg'
else:
filetype = 'expression'
if 'FullDatasets' in filename and import_type == 'filterDataset':
output_file = string.replace(filename,'.txt','-temp.txt')
temp_data = export.ExportFile(output_file)
###Import expression data (non-log space)
if 'counts.' in filename: counts = 'yes'
else: counts = 'no'
try:
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
if len(data)==0: null=[]
elif data[0] != '#' and x == 1: ###Grab expression values
tab_delimited_data = string.split(data,'\t')
probeset = tab_delimited_data[0]
if '=' in probeset: probeset = string.split(probeset,'=')[0]
if '' in tab_delimited_data or '0' in tab_delimited_data and counts == 'no':
None ### Rare GEO datasets remove values from the exon-level data (exclude the whole probeset)
elif import_type == 'raw':
try:
null = import_these_probesets[probeset]; exp_vals = tab_delimited_data[1:]; exp_dbase[probeset] = exp_vals
except KeyError: null = [] ###Don't import any probeset data
elif import_type == 'filterDataset':
try:
null = import_these_probesets[probeset]; temp_data.write(line)
except KeyError: null = [] ###Don't import any probeset data
elif import_type == 'reorderFilterAndExportAll':
if '-' in probeset: biotypes['junction'] = []
else: biotypes['exon'] = []
try:
###For filtering, don't remove re-organized entries but export filtered probesets to another file
if exp_analysis_type == 'expression': null = import_these_probesets[probeset]
exp_vals = tab_delimited_data[1:]
#if counts == 'yes': exp_vals = adjustCounts(exp_vals)
filtered_exp_db={}; filtered_exp_db[probeset] = exp_vals
reorderArraysOnly(filtered_exp_db,filetype,counts) ###order and directly write data
except KeyError: null = [] ###Don't import any probeset data
elif data[0] != '#' and x == 0: ###Grab labels
array_names = []; array_linker_db = {}; z = 0
tab_delimited_data = string.split(data,'\t')
for entry in tab_delimited_data:
if z != 0: array_names.append(entry)
z += 1
for array in array_names: #use this to have an orignal index order of arrays
array = string.replace(array,'\r','') ###This occured once... not sure why
array_linker_db[array] = d; d +=1
x += 1
### Process and export expression dataset headers
if import_type == 'reorderFilterAndExportAll':
if filetype == 'expression':
headers = tab_delimited_data[1:]; probeset_header = tab_delimited_data[0]
filtered_exp_db[probeset_header] = headers
reorderArraysHeader(filtered_exp_db)
elif import_type == 'filterDataset': temp_data.write(line)
except IOError:
#print traceback.format_exc()
print filename, 'not found.'
null=[]
end_time = time.time(); time_diff = int(end_time-start_time)
print "Exon data imported in %d seconds" % time_diff
if array_type == 'RNASeq': id_name = 'junction IDs'
else: id_name = 'array IDs'
if import_type == 'filterDataset': temp_data.close()
if import_type == 'arraynames': return array_linker_db,array_names
if import_type == 'raw':
print len(exp_dbase),id_name,"imported with expression values"
return exp_dbase
else:
return biotypes
def exportGroupedComparisonProbesetData(filename,probeset_db,data_type,array_names,array_linker_db,perform_alt_analysis):
"""This function organizes the raw expression data into sorted groups, exports the organized data for all conditions and comparisons
and calculates which probesets have groups that meet the user defined dabg and expression thresholds."""
#comparison_filename_list=[]
#if perform_alt_analysis != 'expression': ### User Option (removed in version 2.0 since the option prevented propper filtering)
comparison_filename_list=[]
probeset_dbase={}; exp_dbase={}; constitutive_gene_db={}; probeset_gene_db={} ### reset databases to conserve memory
global expr_group_list; global comp_group_list; global expr_group_db
if data_type == 'residuals':
expr_group_dir = string.replace(filename,'residuals.','groups.')
comp_group_dir = string.replace(filename,'residuals.','comps.')
elif data_type == 'expression':
expr_group_dir = string.replace(filename,'exp.','groups.')
comp_group_dir = string.replace(filename,'exp.','comps.')
if 'counts.' in filename:
expr_group_dir = string.replace(expr_group_dir,'counts.','groups.')
comp_group_dir = string.replace(comp_group_dir,'counts.','comps.')
data_type = 'counts'
elif data_type == 'dabg':
expr_group_dir = string.replace(filename,'stats.','groups.')
comp_group_dir = string.replace(filename,'stats.','comps.')
comp_group_list, comp_group_list2 = ExpressionBuilder.importComparisonGroups(comp_group_dir)
expr_group_list,expr_group_db = ExpressionBuilder.importArrayGroups(expr_group_dir,array_linker_db)
print "Reorganizing expression data into comparison groups for export to down-stream splicing analysis software"
###Do this only for the header data
group_count,raw_data_comp_headers = reorder_arrays.reorderArrayHeaders(array_names,expr_group_list,comp_group_list,array_linker_db)
###Export the header info and store the export write data for reorder_arrays
global comparision_export_db; comparision_export_db={}; array_type_name = 'Exon'
if array_type == 'junction': array_type_name = 'Junction'
elif array_type == 'RNASeq': array_type_name = 'RNASeq'
if data_type != 'residuals': AltAnalzye_input_dir = root_dir+"AltExpression/pre-filtered/"+data_type+'/'
else: AltAnalzye_input_dir = root_dir+"AltExpression/FIRMA/residuals/"+array_type+'/'+species+'/' ### These files does not need to be filtered until AltAnalyze.py
for comparison in comp_group_list2: ###loop throught the list of comparisons
group1 = comparison[0]; group2 = comparison[1]
group1_name = expr_group_db[group1]; group2_name = expr_group_db[group2]
comparison_filename = species+'_'+array_type_name+'_'+ group1_name + '_vs_' + group2_name + '.txt'
new_file = AltAnalzye_input_dir + comparison_filename; comparison_filename_list.append(comparison_filename)
data = export.createExportFile(new_file,AltAnalzye_input_dir[:-1])
try: array_names = raw_data_comp_headers[comparison]
except KeyError: print raw_data_comp_headers;kill
title = ['UID']+array_names; title = string.join(title,'\t')+'\n'; data.write(title)
comparision_export_db[comparison] = data ###store the export file write data so we can write after organizing
#print filename, normalize_feature_exp
biotypes = importExonProbesetData(filename,probeset_db,'reorderFilterAndExportAll')
if normalize_feature_exp == 'RPKM': ### Add the gene-level RPKM data (this is in addition to the counts. file)
exp_gene_db={}
for i in probeset_db: exp_gene_db[probeset_db[i][0]]=[]
filename = string.replace(filename,'.txt','-steady-state.txt')
#print filename, normalize_feature_exp, 'here'
importExonProbesetData(filename,exp_gene_db,'reorderFilterAndExportAll')
for comparison in comparision_export_db:
data = comparision_export_db[comparison]; data.close()
print "Pairwise comparisons for AltAnalyze exported..."
try: fulldataset_export_object.close()
except Exception: null=[]
return comparison_filename_list, biotypes
def filterExpressionData(filename,pre_filtered_db,constitutive_gene_db,probeset_db,data_type,array_names,perform_alt_analysis):
"""Probeset level data and gene level data are handled differently by this program for exon and tiling based arrays.
Probeset level data is sequentially filtered to reduce the dataset to a minimal set of expressed probesets (exp p-values) that
that align to genes with multiple lines of evidence (e.g. ensembl) and show some evidence of regulation for any probesets in a
gene (transcript_cluster). Gene level analyses are handled under the main module of the program, exactly like 3' arrays, while
probe level data is reorganized, filtered and output from this module."""
###First time we import, just grab the probesets associated
if array_names != 'null': ### Then there is only an expr. file and not a stats file
###Identify constitutive probesets to import (sometimes not all probesets on the array are imported)
possible_constitutive_probeset={}; probeset_gene_db={}
for probeset in probeset_db:
try:
probe_data = probeset_db[probeset]
gene = probe_data[0]; affy_class = probe_data[-1]; external_exonid = probe_data[-2]
if affy_class == 'core' or len(external_exonid)>2: ### These are known exon only (e.g., 'E' probesets)
proceed = 'yes'
if array_type == 'RNASeq' and 'exon' in biotypes: ### Restrict the analysis to exon RPKM or count data for constitutive calculation
if '-' in probeset: proceed = 'no'
elif array_type == 'RNASeq' and 'junction' in biotypes:
if '-' not in probeset: proceed = 'no' ### Use this option to override
if proceed == 'yes':
try: probeset_gene_db[gene].append(probeset)
except KeyError: probeset_gene_db[gene] = [probeset]
possible_constitutive_probeset[probeset] = []
except KeyError: null = []
### Only import probesets that can be used to calculate gene expression values OR link to gene annotations (which have at least one dabg p<0.05 for all samples normalized)
constitutive_exp_dbase = importExonProbesetData(filename,possible_constitutive_probeset,'raw')
generateConstitutiveExpression(constitutive_exp_dbase,constitutive_gene_db,probeset_gene_db,pre_filtered_db,array_names,filename)
if array_type != 'RNASeq':
### Repeat the analysis to export gene-level DABG reports for LineageProfiler (done by re-calling the current function for RNASeq and counts)
try:
constitutive_exp_dbase = importExonProbesetData(stats_input_dir,possible_constitutive_probeset,'raw')
generateConstitutiveExpression(constitutive_exp_dbase,constitutive_gene_db,probeset_gene_db,pre_filtered_db,array_names,stats_input_dir)
except Exception:
print 'No dabg p-value specified for analysis (must be supplied in command-line or GUI mode - e.g., stats.experiment.txt)'
None ### Occurs when no stats_input_dir specified
constitutive_exp_dbase = {}; possible_constitutive_probeset={}; pre_filtered_db={}; probeset_db={}; constitutive_gene_db={} ### reset databases to conserve memory
probeset_gene_db={}
"""
print 'global vars'
returnLargeGlobalVars()
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
def reorderArraysHeader(filtered_exp_db):
### These are all just headers from the first line
for probeset in filtered_exp_db:
grouped_ordered_array_list = {}; group_list = []
for x in expr_group_list:
y = x[1]; group = x[2] ### this is the new first index
try: new_item = filtered_exp_db[probeset][y]
except Exception:
print y,group, probeset
print 'Prior counts.YourExperiment-steady-state.txt exists and has different samples... delete before proceeding\n'
bad_exit
try: grouped_ordered_array_list[group].append(new_item)
except KeyError: grouped_ordered_array_list[group] = [new_item]
for group in grouped_ordered_array_list: group_list.append(group)
group_list.sort(); combined_value_list=[]
for group in group_list:
group_name = expr_group_db[str(group)]
g_data2 = []; g_data = grouped_ordered_array_list[group]
for header in g_data: g_data2.append(group_name+':'+header)
combined_value_list+=g_data2
values = string.join([probeset]+combined_value_list,'\t')+'\n'
fulldataset_export_object.write(values)
def reorderArraysOnly(filtered_exp_db,filetype,counts):
###array_order gives the final level order sorted, followed by the original index order as a tuple
###expr_group_list gives the final level order sorted, followed by the original index order as a tuple
for probeset in filtered_exp_db:
grouped_ordered_array_list = {}; group_list = []
for x in expr_group_list:
y = x[1]; group = x[2] ### this is the new first index
### for example y = 5, therefore the filtered_exp_db[probeset][5] entry is now the first
try:
try: new_item = filtered_exp_db[probeset][y]
except TypeError: print y,x,expr_group_list; kill
except IndexError: print probeset,y,x,expr_group_list,'\n',filtered_exp_db[probeset];kill
###Used for comparision analysis
try: grouped_ordered_array_list[group].append(new_item)
except KeyError: grouped_ordered_array_list[group] = [new_item]
### For the exon-level expression data, export the group pair data for all pairwise comparisons to different comp files
###*******Include a database with the raw values saved for permuteAltAnalyze*******
for info in comp_group_list:
group1 = int(info[0]); group2 = int(info[1]); comp = str(info[0]),str(info[1])
g1_data = grouped_ordered_array_list[group1]
g2_data = grouped_ordered_array_list[group2]
#print probeset, group1, group2, g1_data, g2_data, info;kill
data = comparision_export_db[comp]
values = [probeset]+g2_data+g1_data; values = string.join(values,'\t')+'\n' ###groups are reversed since so are the labels
#raw_data_comps[probeset,comp] = temp_raw
data.write(values)
### Export all values grouped from the array
for group in grouped_ordered_array_list: group_list.append(group)
group_list.sort(); combined_value_list=[]; avg_values=[]
for group in group_list:
g_data = grouped_ordered_array_list[group]
if exp_analysis_type == 'expression':
try: avg_gdata = statistics.avg(g_data); avg_values.append(avg_gdata)
except Exception:
print g_data
print avg_values
kill
combined_value_list+=g_data
if exp_data_format == 'non-log' and counts == 'no':
try: combined_value_list = logTransform(combined_value_list)
except Exception:
print probeset, combined_value_list,comp_group_list,expr_group_list
print filtered_exp_db[probeset]; kill
if filetype == 'expression':
### Export the expression values for all samples grouped (if meeting the above thresholds)
values = string.join([probeset]+combined_value_list,'\t')+'\n'
fulldataset_export_object.write(values) ### Don't need this for dabg data
if exp_analysis_type == 'expression':
avg_values.sort() ### Sort to get the lowest dabg and largest average expression
if filetype == 'dabg':
if avg_values[0]<=dabg_p_threshold: dabg_summary[probeset]=[] ### store probeset if the minimum p<user-threshold
else:
#if 'ENSMUSG00000018263:' in probeset: print probeset,[avg_values[-1],expression_threshold]
if avg_values[-1]>=expression_threshold:
expression_summary[probeset]=[] ### store probeset if the minimum p<user-threshold
def logTransform(exp_values):
### This code was added in version 1.16 in conjunction with a switch from logstatus to
### non-log in AltAnalyze to prevent "Process AltAnalyze Filtered" associated errors
exp_values_log2=[]
for exp_val in exp_values:
exp_values_log2.append(str(math.log(float(exp_val),2))) ### changed from - log_fold = math.log((float(exp_val)+1),2) - version 2.05
return exp_values_log2
def generateConstitutiveExpression(exp_dbase,constitutive_gene_db,probeset_gene_db,pre_filtered_db,array_names,filename):
"""Generate Steady-State expression values for each gene for analysis in the main module of this package"""
steady_state_db={}; k=0; l=0
remove_nonexpressed_genes = 'no' ### By default set to 'no'
###1st Pass: Identify probesets for steady-state calculation
for gene in probeset_gene_db:
if avg_all_probes_for_steady_state == 'yes': average_all_probesets[gene] = probeset_gene_db[gene] ### These are all exon aligning (not intron) probesets
else:
if gene not in constitutive_gene_db: average_all_probesets[gene] = probeset_gene_db[gene]
else:
constitutive_probeset_list = constitutive_gene_db[gene]
constitutive_filtered=[] ###Added this extra code to eliminate constitutive probesets not in exp_dbase (gene level filters are more efficient when dealing with this many probesets)
for probeset in constitutive_probeset_list:
if probeset in probeset_gene_db[gene]: constitutive_filtered.append(probeset)
if len(constitutive_filtered)>0: average_all_probesets[gene] = constitutive_filtered
else: average_all_probesets[gene] = probeset_gene_db[gene]
###2nd Pass: Remove probesets that have no detected expression (keep all if none are expressed)
if excludeLowExpressionExons:
non_expressed_genes={} ### keep track of these for internal QC
for gene in average_all_probesets:
gene_probe_list=[]; x = 0
for probeset in average_all_probesets[gene]:
if probeset in pre_filtered_db: gene_probe_list.append(probeset); x += 1
###If no constitutive and there are probes with detected expression: replace entry
if x >0: average_all_probesets[gene] = gene_probe_list
elif remove_nonexpressed_genes == 'yes': non_expressed_genes[gene]=[]
if remove_nonexpressed_genes == 'yes':
for gene in non_expressed_genes: del average_all_probesets[gene]
###3rd Pass: Make sure the probesets are present in the input set (this is not typical unless a user is loading a pre-filtered probeset expression dataset)
for gene in average_all_probesets:
v=0
for probeset in average_all_probesets[gene]:
try: null = exp_dbase[probeset]; v+=1
except KeyError: null =[] ###occurs if the expression probeset list is missing some of these probesets
if v==0: ###Therefore, no probesets were found that were previously predicted to be best constitutive
try: average_all_probesets[gene] = probeset_gene_db[gene] ###expand the average_all_probesets to include any exon linked to the gene
except KeyError: print gene, probeset, len(probeset_gene_db), len(average_all_probesets);kill
for probeset in exp_dbase:
array_count = len(exp_dbase[probeset]); break
try: null = array_count
except Exception:
print 'WARNING...CRITICAL ERROR. Make sure the correct array type is selected and that all input expression files are indeed present (array_count ERROR).'; forceError
###Calculate avg expression for each array for each probeset (using constitutive values)
gene_count_db={}
for gene in average_all_probesets:
x = 0 ###For each array, average all probeset expression values
gene_sum=0
probeset_list = average_all_probesets[gene]#; k+= len(average_all_probesets[gene])
if array_type != 'RNASeq': ### Just retain the list of probesets for RNA-seq
while x < array_count:
exp_list=[] ### average all exp values for constituitive probesets for each array
for probeset in probeset_list:
try:
exp_val = exp_dbase[probeset][x]
exp_list.append(exp_val)
except KeyError: null =[] ###occurs if the expression probeset list is missing some of these probesets
try:
if len(exp_list)==0:
for probeset in probeset_list:
try:
exp_val = exp_dbase[probeset][x]
exp_list.append(exp_val)
except KeyError: null =[] ###occurs if the expression probeset list is missing some of these probesets
avg_const_exp=statistics.avg(exp_list)
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append(avg_const_exp)
except KeyError: steady_state_db[gene] = [avg_const_exp]
except ZeroDivisionError: null=[] ### Occurs when processing a truncated dataset (for testing usually) - no values for the gene should be included
x += 1
l = len(probeset_gene_db) - len(steady_state_db)
steady_state_export = filename[0:-4]+'-steady-state.txt'
steady_state_export = string.replace(steady_state_export,'counts.','exp.')
fn=filepath(steady_state_export); data = open(fn,'w'); title = 'Gene_ID'
if array_type == 'RNASeq':
import RNASeq
steady_state_db, pre_filtered_db = RNASeq.calculateGeneLevelStatistics(steady_state_export,species,average_all_probesets,normalize_feature_exp,array_names,UserOptions,excludeLowExp=excludeLowExpressionExons)
### This "pre_filtered_db" replaces the above since the RNASeq module performs the exon and junction-level filtering, not ExonArray (RPKM and count based)
### Use pre_filtered_db to exclude non-expressed features for multi-group alternative exon analysis
removeNonExpressedProbesets(pre_filtered_db,full_dataset_export_dir)
reload(RNASeq)
for array in array_names: title = title +'\t'+ array
data.write(title+'\n')
for gene in steady_state_db:
ss_vals = gene
for exp_val in steady_state_db[gene]:
ss_vals = ss_vals +'\t'+ str(exp_val)
data.write(ss_vals+'\n')
data.close()
exp_dbase={}; steady_state_db={}; pre_filtered_db ={}
#print k, "probesets were not found in the expression file, that could be used for the constitutive expression calculation"
#print l, "genes were also not included that did not have such expression data"
print "Steady-state data exported to",steady_state_export
def permformFtests(filtered_exp_db,group_count,probeset_db):
###Perform an f-test analysis to filter out low significance probesets
ftest_gene_db={}; filtered_gene_db={}; filtered_gene_db2={}
for probeset in filtered_exp_db:
len_p = 0; ftest_list=[]
try: gene_id = probeset_db[probeset][0]
except KeyError: continue
for len_s in group_count:
index = len_s + len_p
exp_group = filtered_exp_db[probeset][len_p:index]
ftest_list.append(exp_group); len_p = index
fstat,df1,df2 = statistics.Ftest(ftest_list)
if fstat > f_cutoff:
ftest_gene_db[gene_id] = 1
try: filtered_gene_db[gene_id].append(probeset)
except KeyError: filtered_gene_db[gene_id] = [probeset]
###Remove genes with no significant f-test probesets
#print "len(ftest_gene_db)",len(filtered_gene_db)
for gene_id in filtered_gene_db:
if gene_id in ftest_gene_db:
filtered_gene_db2[gene_id] = filtered_gene_db[gene_id]
#print "len(filtered_gene_db)",len(filtered_gene_db2)
return filtered_gene_db2
################# Resolve data annotations and filters
def eliminateRedundant(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def filtereLists(list1,db1):
###Used for large lists where string searches are computationally intensive
list2 = []; db2=[]
for key in db1:
for entry in db1[key]:
id = entry[1][-1]; list2.append(id)
for key in list1: db2.append(key)
for entry in list2: db2.append(entry)
temp={}; combined = []
for entry in db2:
try:temp[entry] += 1
except KeyError:temp[entry] = 1
for entry in temp:
if temp[entry] > 1:combined.append(entry)
return combined
def makeGeneLevelAnnotations(probeset_db):
transcluster_db = {}; exon_db = {}; probeset_gene_db={}
#probeset_db[probeset] = gene,transcluster,exon_id,ens_exon_ids,exon_annotations,constitutitive
### Previously built the list of probesets for calculating steady-stae
for gene in average_all_probesets:
for probeset in average_all_probesets[gene]:
gene_id,transcluster,exon_id,ens_exon_ids,affy_class = probeset_db[probeset]
try: transcluster_db[gene].append(transcluster)
except KeyError: transcluster_db[gene] = [transcluster]
ens_exon_list = string.split(ens_exon_ids,'|')
for exon in ens_exon_list:
try: exon_db[gene].append(ens_exon_ids)
except KeyError: exon_db[gene] = [ens_exon_ids]
transcluster_db = eliminateRedundant(transcluster_db)
exon_db = eliminateRedundant(exon_db)
for gene in average_all_probesets:
transcript_cluster_ids = string.join(transcluster_db[gene],'|')
probeset_ids = string.join(average_all_probesets[gene],'|')
exon_ids = string.join(exon_db[gene],'|')
probeset_gene_db[gene] = transcript_cluster_ids,exon_ids,probeset_ids
return probeset_gene_db
def getAnnotations(fl,Array_type,p_threshold,e_threshold,data_source,manufacturer,constitutive_source,Species,avg_all_for_ss,filter_by_DABG,perform_alt_analysis,expression_data_format):
global species; species = Species; global average_all_probesets; average_all_probesets={}
global avg_all_probes_for_steady_state; avg_all_probes_for_steady_state = avg_all_for_ss; global filter_by_dabg; filter_by_dabg = filter_by_DABG
global dabg_p_threshold; dabg_p_threshold = float(p_threshold); global root_dir; global biotypes; global normalize_feature_exp
global expression_threshold; global exp_data_format; exp_data_format = expression_data_format; global UserOptions; UserOptions = fl
global full_dataset_export_dir; global excludeLowExpressionExons
"""
try: exon_exp_threshold = fl.ExonExpThreshold()
except Exception: exon_exp_threshold = 0
try: exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception: exon_rpkm_threshold = 0
try: gene_rpkm_threshold = fl.RPKMThreshold()
except Exception: gene_rpkm_threshold = 0
try: gene_exp_threshold = fl.GeneExpThreshold()
except Exception: gene_exp_threshold = 0
"""
### The input expression data can be log or non-log. If non-log, transform to log in FilterDABG prior to the alternative exon analysis - v.1.16
if expression_data_format == 'log':
try: expression_threshold = math.log(float(e_threshold),2)
except Exception: expression_threshold = 0 ### Applies to RNASeq datasets
else:
expression_threshold = float(e_threshold)
process_from_scratch = 'no' ###internal variables used while testing
global dabg_summary; global expression_summary; dabg_summary={};expression_summary={}
global fulldataset_export_object; global array_type; array_type = Array_type
global exp_analysis_type; exp_analysis_type = 'expression'
global stats_input_dir
expr_input_dir = fl.ExpFile(); stats_input_dir = fl.StatsFile(); root_dir = fl.RootDir()
try: normalize_feature_exp = fl.FeatureNormalization()
except Exception: normalize_feature_exp = 'NA'
try: excludeLowExpressionExons = fl.excludeLowExpressionExons()
except Exception: excludeLowExpressionExons = True
try:
useJunctionsForGeneExpression = fl.useJunctionsForGeneExpression()
if useJunctionsForGeneExpression:
print 'Using known junction only to estimate gene expression!!!'
except Exception: useJunctionsForGeneExpression = False
source_biotype = 'mRNA'
if array_type == 'gene': source_biotype = 'gene'
elif array_type == 'junction': source_biotype = 'junction'
###Get annotations using Affymetrix as a trusted source or via links to Ensembl
if array_type == 'AltMouse':
probeset_db,constitutive_gene_db = ExpressionBuilder.importAltMerge('full'); annotate_db={}
source_biotype = 'AltMouse'
elif manufacturer == 'Affymetrix' or array_type == 'RNASeq':
if array_type == 'RNASeq':
source_biotype = array_type, root_dir
probeset_db,annotate_db,constitutive_gene_db,splicing_analysis_db = ExonArrayEnsemblRules.getAnnotations(process_from_scratch,constitutive_source,source_biotype,species)
### Get all file locations and get array headers
#print len(splicing_analysis_db),"genes included in the splicing annotation database (constitutive only containing)"
stats_file_status = verifyFile(stats_input_dir)
array_linker_db,array_names = importExonProbesetData(expr_input_dir,{},'arraynames')
input_dir_split = string.split(expr_input_dir,'/')
full_dataset_export_dir = root_dir+'AltExpression/FullDatasets/ExonArray/'+species+'/'+string.replace(input_dir_split[-1],'exp.','')
if array_type == 'gene': full_dataset_export_dir = string.replace(full_dataset_export_dir,'ExonArray','GeneArray')
if array_type == 'junction': full_dataset_export_dir = string.replace(full_dataset_export_dir,'ExonArray','JunctionArray')
if array_type == 'AltMouse': full_dataset_export_dir = string.replace(full_dataset_export_dir,'ExonArray','AltMouse')
if array_type == 'RNASeq': full_dataset_export_dir = string.replace(full_dataset_export_dir,'ExonArray','RNASeq')
try: fulldataset_export_object = export.ExportFile(full_dataset_export_dir)
except Exception:
print 'AltAnalyze is having trouble creating the directory:\n',full_dataset_export_dir
print 'Report this issue to the AltAnalyze help desk or create this directory manually (Error Code X1).'; force_exception
### Organize arrays according to groups and export all probeset data and any pairwise comparisons
data_type = 'expression'
if array_type == 'RNASeq':
expr_input_dir = string.replace(expr_input_dir,'exp.','counts.') ### Filter based on the counts file and then replace values with the normalized as the last step
comparison_filename_list,biotypes = exportGroupedComparisonProbesetData(expr_input_dir,probeset_db,data_type,array_names,array_linker_db,perform_alt_analysis)
if useJunctionsForGeneExpression:
if 'junction' in biotypes:
if 'exon' in biotypes: del biotypes['exon']
if filter_by_dabg == 'yes' and stats_file_status == 'found':
data_type = 'dabg'
exportGroupedComparisonProbesetData(stats_input_dir,probeset_db,data_type,array_names,array_linker_db,perform_alt_analysis)
###Filter expression data based on DABG and annotation filtered probesets (will work without DABG filtering as well) - won't work for RNA-Seq (execute function later)
filtered_exon_db = removeNonExpressedProbesets(probeset_db,full_dataset_export_dir)
filterExpressionData(expr_input_dir,filtered_exon_db,constitutive_gene_db,probeset_db,'expression',array_names,perform_alt_analysis)
constitutive_gene_db={}; probeset_gene_db = makeGeneLevelAnnotations(probeset_db)
if array_type == 'RNASeq':
fulldataset_export_object = export.ExportFile(full_dataset_export_dir)
data_type = 'expression' ### Repeat with counts and then with exp. to add gene-level estimates to both
exportGroupedComparisonProbesetData(expr_input_dir,probeset_db,data_type,array_names,array_linker_db,perform_alt_analysis)
fulldataset_export_object = export.ExportFile(full_dataset_export_dir)
expr_input_dir = string.replace(expr_input_dir,'counts.','exp.')
exportGroupedComparisonProbesetData(expr_input_dir,probeset_db,data_type,array_names,array_linker_db,perform_alt_analysis)
try: clearObjectsFromMemory(average_all_probesets); clearObjectsFromMemory(expression_summary); clearObjectsFromMemory(splicing_analysis_db)
except Exception: null=[]
filtered_exon_db=[]; probeset_db={}; average_all_probesets={}; expression_summary={}; splicing_analysis_db={}
#filtered_exp_db,group_count,ranked_array_headers = filterExpressionData(expr_input_dir,filtered_exon_db,constitutive_gene_db,probeset_db)
#filtered_gene_db = permformFtests(filtered_exp_db,group_count,probeset_db)
"""
pre_filtered_db=[]
print 'global vars'
returnLargeGlobalVars()
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return probeset_gene_db, annotate_db, comparison_filename_list
def processResiduals(fl,Array_type,Species,perform_alt_analysis):
global species; species = Species; global root_dir; global fulldataset_export_object
global array_type; array_type = Array_type; global exp_analysis_type; exp_analysis_type = 'residual'
### Get all file locations and get array headers
expr_input_dir = fl.ExpFile(); root_dir = fl.RootDir()
array_linker_db,array_names = importExonProbesetData(expr_input_dir,{},'arraynames')
input_dir_split = string.split(expr_input_dir,'/')
full_dataset_export_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'+string.replace(input_dir_split[-1],'exp.','')
expr_input_dir = string.replace(expr_input_dir,'exp.','residuals.') ### Wait to change this untile the above processes are finished
fulldataset_export_object = export.ExportFile(full_dataset_export_dir)
#print full_dataset_export_dir
### Organize arrays according to groups and export all probeset data and any pairwise comparisons
comparison_filename_list = exportGroupedComparisonProbesetData(expr_input_dir,{},'residuals',array_names,array_linker_db,perform_alt_analysis)
def removeNonExpressedProbesets(probeset_db,full_dataset_export_dir):
combined_db={}
if array_type == 'RNASeq':
id_name = 'junction IDs'
combined_db = probeset_db
print len(combined_db), id_name,'after detection RPKM and read count filtering.'
else:
id_name = 'array IDs'
print len(expression_summary), 'expression and',len(dabg_summary),'detection p-value filtered '+id_name+' out of', len(probeset_db)
for probeset in probeset_db:
if len(dabg_summary)>0:
try:
n = expression_summary[probeset]
s = dabg_summary[probeset]
combined_db[probeset]=[]
except Exception: null=[]
else:
try:
n = expression_summary[probeset]
combined_db[probeset]=[]
except Exception: null=[]
print len(combined_db), id_name,'after detection p-value and expression filtering.'
rewriteOrganizedExpressionFile(combined_db,full_dataset_export_dir)
return combined_db
def rewriteOrganizedExpressionFile(combined_db,full_dataset_export_dir):
importExonProbesetData(full_dataset_export_dir,combined_db,'filterDataset')
temp_dir = string.replace(full_dataset_export_dir,'.txt','-temp.txt')
import shutil
try:
shutil.copyfile(temp_dir, full_dataset_export_dir) ### replace unfiltered file
os.remove(temp_dir)
except Exception: null=[]
def inputResultFiles(filename,file_type):
fn=filepath(filename)
gene_db={}; x = 0
###Import expression data (non-log space)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
if file_type == 'gene': geneid = t[0]; gene_db[geneid]=[]
else: probeset = t[7]; gene_db[probeset]=[]
return gene_db
def grabExonIntronPromoterSequences(species,array_type,data_type,output_types):
### output_types could be adjacent intron sequences, adjacent exon sequences, targets exon sequence or promoter
sequence_input_dir_list=[]
if data_type == 'probeset': sequence_input_dir = '/AltResults/AlternativeOutput/'+array_type+'/sequence_input'
if data_type == 'gene': sequence_input_dir = '/ExpressionOutput/'+array_type+'/sequence_input'
dir_list = read_directory(sequence_input_dir)
for input_file in dir_list:
filedir = sequence_input_dir[1:]+'/'+input_file
filter_db = inputResultFiles(filedir,data_type)
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,data_type,filter_db)
"""for gene in ensembl_probeset_db:
if gene == 'ENSG00000139737':
for x in ensembl_probeset_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = x
print gene, ed.ExonID()
kill"""
analysis_type = 'get_sequence'
dir = 'AltDatabase/ensembl/'+species+'/'; gene_seq_filename = dir+species+'_gene-seq-2000_flank'
ensembl_probeset_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_probeset_db,species,analysis_type)
"""
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
if output_types == 'all' and data_type == 'probeset':
output_types = ['alt-promoter','promoter','exon','adjacent-exons','adjacent-introns']
else: output_types = [output_types]
for output_type in output_types:
sequence_input_dir = string.replace(sequence_input_dir,'_input','_output')
filename = sequence_input_dir[1:]+'/ExportedSequence-'+data_type+'-'+output_type+'.txt'
exportExonIntronPromoterSequences(filename, ensembl_probeset_db,data_type,output_type)
"""
if output_types == 'all' and data_type == 'probeset':
output_types = ['alt-promoter','promoter','exon','adjacent-exons','adjacent-introns']
else: output_types = [output_types]
for output_type in output_types:
sequence_input_dir2 = string.replace(sequence_input_dir,'_input','_output')
filename = sequence_input_dir2[1:]+'/'+input_file[:-4]+'-'+data_type+'-'+output_type+'.txt'
exportExonIntronPromoterSequences(filename, ensembl_probeset_db,data_type,output_type)
def exportExonIntronPromoterSequences(filename,ensembl_probeset_db,data_type,output_type):
exon_seq_db_filename = filename
fn=filepath(exon_seq_db_filename); data = open(fn,'w'); gene_data_exported={}; probe_data_exported={}
if data_type == 'gene' or output_type == 'promoter': seq_title = 'PromoterSeq'
elif output_type == 'alt-promoter': seq_title = 'PromoterSeq'
elif output_type == 'exon': seq_title = 'ExonSeq'
elif output_type == 'adjacent-exons': seq_title = 'PrevExonSeq\tNextExonSeq'
elif output_type == 'adjacent-introns': seq_title = 'PrevIntronSeq\tNextIntronSeq'
title = ['Ensembl_GeneID','ExonID','ExternalExonIDs','ProbesetID',seq_title]
title = string.join(title,'\t')+'\n'; #data.write(title)
for ens_gene in ensembl_probeset_db:
for probe_data in ensembl_probeset_db[ens_gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
ens_exon_list = ed.ExonID(); ens_exons = string.join(ens_exon_list,' ')
proceed = 'no'
try:
if data_type == 'gene' or output_type == 'promoter':
try: seq = [ed.PromoterSeq()]; proceed = 'yes'
except AttributeError: proceed = 'no'
elif output_type == 'alt-promoter':
if 'alt-N-term' in ed.AssociatedSplicingEvent() or 'altPromoter' in ed.AssociatedSplicingEvent():
try: seq = [ed.PrevIntronSeq()]; proceed = 'yes'
except AttributeError: proceed = 'no'
elif output_type == 'exon':
try: seq = [ed.ExonSeq()]; proceed = 'yes'
except AttributeError: proceed = 'no'
elif output_type == 'adjacent-exons':
if len(ed.PrevExonSeq())>1 and len(ed.NextExonSeq())>1:
try: seq = ['(prior-exon-seq)'+ed.PrevExonSeq(),'(next-exon-seq)'+ed.NextExonSeq()]; proceed = 'yes'
except AttributeError: proceed = 'no'
elif output_type == 'adjacent-introns':
if len(ed.PrevIntronSeq())>1 and len(ed.NextIntronSeq())>1:
try: seq = ['(prior-intron-seq)'+ed.PrevIntronSeq(), '(next-intron-seq)'+ed.NextIntronSeq()]; proceed = 'yes'
except AttributeError: proceed = 'no'
except AttributeError: proceed = 'no'
if proceed == 'yes':
gene_data_exported[ens_gene]=[]
probe_data_exported[probeset_id]=[]
if data_type == 'gene':
values = ['>'+ens_gene]
else: values = ['>'+ens_gene,exon_id,ens_exons,probeset_id]
values = string.join(values,'|')+'\n';data.write(values)
for seq_data in seq:
i = 0; e = 100
while i < len(seq_data):
seq_line = seq_data[i:e]; data.write(seq_line+'\n')
i+=100; e+=100
#print len(gene_data_exported), 'gene entries exported'
#print len(probe_data_exported), 'probeset entries exported'
data.close()
#print exon_seq_db_filename, 'exported....'
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
if __name__ == '__main__':
m = 'Mm'
h = 'Hs'
r = 'Rn'
Species = h
Array_type = 'junction'
exportMetaProbesets(Array_type,Species);sys.exit()
Data_type = 'probeset'
Output_types = 'promoter'
Output_types = 'all'
grabExonIntronPromoterSequences(Species,Array_type,Data_type,Output_types)
sys.exit()
#"""
avg_all_for_ss = 'yes'
import_dir = '/AltDatabase/'+Species+ '/exon'
expr_file_dir = 'ExpressionInput\exp.HEK-confluency.plier.txt'
dagb_p = 0.001
f_cutoff = 2.297
exons_to_grab = "core"
x = 'Affymetrix'
y = 'Ensembl'
z = 'default'
data_source = y
constitutive_source = z
filename = expr_file_dir; p = dagb_p
getAnnotations(expr_file_dir,dagb_p,exons_to_grab,data_source,constitutive_source,Species)
global species; species = Species
process_from_scratch = 'no'
###Get annotations using Affymetrix as a trusted source or via links to Ensembl
if data_source == 'Affymetrix':
annotation_dbases = ExonArrayAffyRules.getAnnotations(exons_to_grab,constitutive_source,process_from_scratch)
probe_association_db,constitutive_gene_db,exon_location_db, trans_annotation_db, trans_annot_extended = annotation_dbases
else:
probeset_db,annotate_db,constitutive_gene_db,splicing_analysis_db = ExonArrayEnsemblRules.getAnnotations(process_from_scratch,constitutive_source,species,avg_all_for_ss)
filterExpressionData(filename,filtered_exon_db,constitutive_gene_db,probeset_db,data_type)
#filtered_gene_db = permformFtests(filtered_exp_db,group_count,probeset_db)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/ExonArray.py
|
ExonArray.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from stats_scripts import statistics
import copy
import time
from build_scripts import ExonSeqModule
import update
dirfile = unique
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".TXT" or entry[-3:] == ".fa":
dir_list2.append(entry)
return dir_list2
def filepath(filename):
fn = unique.filepath(filename)
return fn
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
#################### Begin Analyzing Datasets ####################
def importProbesetSeqeunces(filename,exon_db,species):
print 'importing', filename
fn=filepath(filename)
probeset_seq_db={}; x = 0;count = 0
for line in open(fn,'r').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
probeset = t[0]; sequence = t[-1]; sequence = string.upper(sequence)
try:
y = exon_db[probeset]; gene = y.GeneID(); y.SetExonSeq(sequence)
try: probeset_seq_db[gene].append(y)
except KeyError: probeset_seq_db[gene] = [y]
except KeyError: null=[] ### Occurs if there is no Ensembl for the critical exon or the sequence is too short to analyze
print len(probeset_seq_db), "length of gene - probeset sequence database"
return probeset_seq_db
def importSplicingAnnotationDatabaseAndSequence(species,array_type,biotype):
array_ens_db={}
if array_type == 'AltMouse':
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl_relationships.txt'
update.verifyFile(filename,array_type) ### Will force download if missing
fn=filepath(filename); x = 0
for line in open(fn,'r').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
array_gene,ens_gene = t
try: array_ens_db[array_gene].append(ens_gene)
except KeyError: array_ens_db[array_gene]=[ens_gene]
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-junction-seq.txt'
fn=filepath(filename); probeset_seq_db={}; x = 0
for line in open(fn,'r').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
probeset,probeset_seq,junction_seq = t; junction_seq=string.replace(junction_seq,'|','')
probeset_seq_db[probeset] = probeset_seq,junction_seq
###Import reciprocol junctions, so we can compare these directly instead of hits to nulls and combine with sequence data
###This short-cuts what we did in two function in ExonSeqModule with exon level data
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_junction-comparisons.txt'
fn=filepath(filename); probeset_gene_seq_db={}; x = 0
for line in open(fn,'r').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
array_gene,probeset1,probeset2,critical_exons = t #; critical_exons = string.split(critical_exons,'|')
probesets = [probeset1,probeset2]
if array_type == 'junction' or array_type == 'RNASeq': array_ens_db[array_gene]=[array_gene]
if array_gene in array_ens_db:
ensembl_gene_ids = array_ens_db[array_gene]
for probeset_id in probesets:
if probeset_id in probeset_seq_db:
probeset_seq,junction_seq = probeset_seq_db[probeset_id]
if biotype == 'gene':
for ensembl_gene_id in ensembl_gene_ids:
probe_data = ExonSeqModule.JunctionDataSimple(probeset_id,ensembl_gene_id,array_gene,probesets,critical_exons)
probe_data.SetExonSeq(probeset_seq)
probe_data.SetJunctionSeq(junction_seq)
try: probeset_gene_seq_db[ensembl_gene_id].append(probe_data)
except KeyError: probeset_gene_seq_db[ensembl_gene_id] = [probe_data]
else: ### Used for probeset annotations downstream of sequence alignment in LinkEST, analagous to exon_db for exon analyses
probe_data = ExonSeqModule.JunctionDataSimple(probeset_id,ensembl_gene_ids,array_gene,probesets,critical_exons)
probe_data.SetExonSeq(probeset_seq)
probe_data.SetJunctionSeq(junction_seq)
probeset_gene_seq_db[probeset_id] = probe_data
print len(probeset_gene_seq_db),"genes with probeset sequence associated"
return probeset_gene_seq_db
def getParametersAndExecute(probeset_seq_file,array_type,species,data_type):
if data_type == 'critical-exons':
if array_type == 'RNASeq': probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_exons.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
###Import probe-level associations
exon_db = ExonSeqModule.importSplicingAnnotationDatabase(probeset_annotations_file,array_type)
start_time = time.time()
probeset_seq_db = importProbesetSeqeunces(probeset_seq_file,exon_db,species) ###Do this locally with a function that works on tab-delimited as opposed to fasta sequences (exon array)
end_time = time.time(); time_diff = int(end_time-start_time)
elif data_type == 'junctions':
start_time = time.time(); biotype = 'gene' ### Indicates whether to store information at the level of genes or probesets
probeset_seq_db = importSplicingAnnotationDatabaseAndSequence(species,array_type,biotype)
end_time = time.time(); time_diff = int(end_time-start_time)
print "Analyses finished in %d seconds" % time_diff
return probeset_seq_db
def runProgram(Species,Array_type,mir_source,stringency,Force):
global species; global array_type; global force
process_microRNA_predictions = 'yes'
species = Species; array_type = Array_type; force = Force
import_dir = '/AltDatabase/'+species+'/'+array_type
filedir = import_dir[1:]+'/'
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
probeset_seq_file=''
for input_file in dir_list: #loop through each file in the directory to results
if 'critical-exon-seq_updated' in input_file: probeset_seq_file = filedir+input_file
elif 'critical-exon-seq' in input_file: probeset_seq_file2 = filedir+input_file
if len(probeset_seq_file)==0: probeset_seq_file=probeset_seq_file2
data_type = 'critical-exons'
try: splice_event_db = getParametersAndExecute(probeset_seq_file,array_type,species,data_type)
except UnboundLocalError:
probeset_seq_file = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-exon-seq_updated.txt'
update.downloadCurrentVersion(probeset_seq_file,array_type,'txt')
splice_event_db = getParametersAndExecute(probeset_seq_file,array_type,species,data_type)
if process_microRNA_predictions == 'yes':
print 'stringency:',stringency
try:
ensembl_mirna_db = ExonSeqModule.importmiRNATargetPredictionsAdvanced(species)
ExonSeqModule.alignmiRNAData(array_type,mir_source,species,stringency,ensembl_mirna_db,splice_event_db)
except Exception: pass
if __name__ == '__main__':
species = 'Mm'; array_type = 'junction'
process_microRNA_predictions = 'yes'
mir_source = 'multiple'
force = 'yes'
runProgram(species,array_type,mir_source,'lax',force)
runProgram(species,array_type,mir_source,'strict',force)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/JunctionSeqModule.py
|
JunctionSeqModule.py
|
###IdentifyAltIsoforms
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import export
import time
import copy
from build_scripts import FeatureAlignment; reload(FeatureAlignment)
from Bio import Entrez
from build_scripts import ExonAnalyze_module
from build_scripts import mRNASeqAlign
import traceback
import requests
requests.packages.urllib3.disable_warnings()
import ssl
try: _create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if (entry[-4:] == ".txt"or entry[-4:] == ".tab" or entry[-4:] == ".csv" or '.fa' in entry) and '.gz' not in entry and '.zip' not in entry: dir_list2.append(entry)
return dir_list2
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file_dirs = getDirectoryFiles(self.data,str(search_term))
if len(file_dirs)<1: print search_term,'not found',self.data
return file_dirs
def getDirectoryFiles(import_dir, search_term):
exact_file = ''; exact_file_dirs=[]
try: dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
print unique.filepath(import_dir)
export.createDirPath(unique.filepath(import_dir[1:]))
dir_list = read_directory(import_dir)
dir_list.sort() ### Get the latest files
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file_dirs.append(affy_data_dir)
return exact_file_dirs
########## End generic file import ##########
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importSplicingAnnotationDatabase(array_type,species,import_coordinates):
if array_type == 'exon' or array_type == 'gene': filename = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_probesets.txt"
elif array_type == 'junction': filename = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
elif array_type == 'RNASeq': filename = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_exons.txt"
fn=filepath(filename); x=0; probeset_gene_db={}; probeset_coordinate_db={}
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
t=string.split(probeset_data,'\t'); probeset=t[0]; exon_id=t[1]; ens_gene=t[2]; start=int(t[6]); stop=int(t[7])
if array_type != 'RNASeq':
if ':' in probeset: probeset = string.split(probeset,':')[1]
start_stop = [start,stop]; start_stop.sort(); start,stop = start_stop; proceed = 'yes'
#if probeset[-2] != '|': ### These are the 5' and 3' exons for a splice-junction (don't need to analyze)
if test == 'yes':
if ens_gene not in test_genes: proceed = 'no'
if proceed == 'yes':
probeset_gene_db[probeset]=ens_gene,exon_id
if import_coordinates == 'yes': probeset_coordinate_db[probeset] = start,stop
print 'Probeset to Ensembl gene data imported'
if import_coordinates == 'yes': return probeset_gene_db,probeset_coordinate_db
else: return probeset_gene_db
def importAltMouseJunctionDatabase(species,array_type):
### Import AffyGene to Ensembl associations (e.g., AltMouse array)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl.txt'
fn=filepath(filename); array_ens_db={}; x = 0
for line in open(fn,'r').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
array_gene,ens_gene = t
array_ens_db[array_gene]=ens_gene
#try: array_ens_db[array_gene].append(ens_gene)
#except KeyError: array_ens_db[array_gene]=[ens_gene]
filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_junction-comparisons.txt'
fn=filepath(filename); probeset_gene_db={}; x = 0
for line in open(fn,'r').xreadlines():
data, newline = string.split(line,'\n'); t = string.split(data,'\t')
if x==0: x=1
else:
array_gene,probeset1,probeset2,critical_exons = t #; critical_exons = string.split(critical_exons,'|')
if array_gene in array_ens_db:
ensembl_gene_ids = array_ens_db[array_gene]
else: ensembl_gene_ids=[]
probeset_gene_db[probeset1+'|'+probeset2] = array_gene,critical_exons
probeset_gene_db[probeset1] = array_gene,critical_exons
probeset_gene_db[probeset2] = array_gene,critical_exons
return probeset_gene_db
def importJunctionDatabase(species,array_type):
if array_type == 'junction': filename = 'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
else: filename = 'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps.txt'
fn=filepath(filename); probeset_gene_db={}; x=0
for line in open(fn,'rU').xreadlines():
if x==0: x=1
else:
data = cleanUpLine(line)
gene,critical_exons,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source = string.split(data,'\t')
probeset_gene_db[incl_junction_probeset+'|'+excl_junction_probeset] = gene,critical_exons
probeset_gene_db[excl_junction_probeset] = gene,critical_exons
probeset_gene_db[incl_junction_probeset] = gene,critical_exons
return probeset_gene_db
def importEnsExonStructureDataSimple(filename,species,ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db,filter_transcripts):
fn=filepath(filename); x=0
print fn
"""
if 'Ensembl' not in filename:
original_ensembl_transcripts={} ### Keep track of the original ensembl transcripts
for i in ens_transcript_exon_db: original_ensembl_transcripts[i]=[]"""
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, chr, strand, exon_start, exon_end, ens_exonid, constitutive_exon, ens_transcriptid = t
exon_start = int(exon_start); exon_end = int(exon_end); proceed = 'yes'
if test == 'yes':
if gene not in test_genes: proceed = 'no'
if (ens_transcriptid in filter_transcripts or len(filter_transcripts)==0) and proceed == 'yes':
### Only import transcripts that are in the set to analyze
try: ens_transcript_exon_db[ens_transcriptid][exon_start,exon_end]=[]
except KeyError: ens_transcript_exon_db[ens_transcriptid] = db = {(exon_start,exon_end):[]}
try: ens_gene_transcript_db[gene][ens_transcriptid]=[]
except KeyError: ens_gene_transcript_db[gene] = db = {ens_transcriptid:[]}
try: ens_gene_exon_db[gene][exon_start,exon_end]=[]
except KeyError: ens_gene_exon_db[gene] = db = {(exon_start,exon_end):[]}
"""
### Some transcripts will have the same coordinates - get rid of these (not implemented yet)
if 'Ensembl' not in filename:
for gene in ens_gene_transcript_db:
exon_structure_db={}
for transcript in ens_gene_transcript_db[gene]:
ls=[]
for coord in ens_transcript_exon_db[transcript]: ls.append(coord)
ls.sort()
try: exon_structure_db[tuple(ls)].append(transcript)
except Exception: exon_structure_db[tuple(ls)] = [transcript]"""
#ens_gene_transcript_db = eliminateRedundant(ens_gene_transcript_db)
#ens_gene_exon_db = eliminateRedundant(ens_gene_exon_db)
#if 'ENSG00000240173' in ens_gene_exon_db: print 'here'
print 'Exon/transcript data imported for %s transcripts' % len(ens_transcript_exon_db), len(ens_gene_exon_db)
return ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db
def eliminateRedundant(database):
for key in database:
try:
list = makeUnique(database[key])
list.sort()
except Exception: list = unique.unique(database[key])
database[key] = list
return database
def makeUnique(item):
db1={}; list1=[]
for i in item: db1[i]=[]
for i in db1: list1.append(i)
list1.sort()
return list1
def getProbesetExonCoordinates(probeset_coordinate_db,probeset_gene_db,ens_gene_exon_db):
probeset_exon_coor_db={}
for probeset in probeset_gene_db:
gene = probeset_gene_db[probeset][0]
start,stop = probeset_coordinate_db[probeset]
coor_list = ens_gene_exon_db[gene]
proceed = 'yes'
if test == 'yes':
if gene not in test_genes: proceed = 'no'
if proceed == 'yes':
for (t_start,t_stop) in coor_list:
if ((start >= t_start) and (start <= t_stop)) and ((stop >= t_start) and (stop <= t_stop)):
### Thus the probeset is completely in this exon
try: probeset_exon_coor_db[probeset].append((t_start,t_stop))
except KeyError: probeset_exon_coor_db[probeset]=[(t_start,t_stop)]
#if '20.8' in probeset: print (t_start,t_stop),start,stop
#sys.exit()
return probeset_exon_coor_db
def compareExonComposition(species,array_type):
probeset_gene_db,probeset_coordinate_db = importSplicingAnnotationDatabase(array_type, species,'yes')
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db = importEnsExonStructureDataSimple(filename,species,{},{},{},{})
### Add UCSC transcript data to ens_transcript_exon_db and ens_gene_transcript_db
try:
filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_COMPLETE-mrna.txt' ### Use the non-filtered database to propperly analyze exon composition
ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db = importEnsExonStructureDataSimple(filename,species,ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db,{})
except Exception:pass
### Derive probeset to exon associations De Novo
probeset_exon_coor_db = getProbesetExonCoordinates(probeset_coordinate_db,probeset_gene_db,ens_gene_exon_db)
if (array_type == 'junction' or array_type == 'RNASeq') and data_type == 'exon':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+species+'_all-transcript-matches.txt'
else:
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_all-transcript-matches.txt'
data = export.ExportFile(export_file)
### Identifying isforms containing and not containing the probeset
probeset_transcript_db={}; match_pairs_missing=0; valid_transcript_pairs={}; ok_transcript_pairs={}; probesets_not_found=0
print len(probeset_exon_coor_db)
print len(ens_transcript_exon_db)
print len(ens_gene_transcript_db)
print len(ens_gene_exon_db)
for probeset in probeset_exon_coor_db:
geneid = probeset_gene_db[probeset][0]
transcripts = ens_gene_transcript_db[geneid]
matching_transcripts=[]; not_matching_transcripts=[]; matching={}; not_matching={}
for coordinates in probeset_exon_coor_db[probeset]: ### Multiple exons may align
for transcript in transcripts:
### Build a cursory list of matching and non-matching transcripts
if coordinates in ens_transcript_exon_db[transcript]:
matching_transcripts.append(transcript)
else:
not_matching_transcripts.append(transcript)
### Filter large non-matching transcript sets to facilate processing
not_matching_transcripts = mRNASeqAlign.filterNullMatch(not_matching_transcripts,matching_transcripts)
### Re-analyze the filtered list for exon content
transcripts = unique.unique(not_matching_transcripts+matching_transcripts)
matching_transcripts=[]; not_matching_transcripts=[]
for coordinates in probeset_exon_coor_db[probeset]: ### Multiple exons may align
for transcript in transcripts:
if coordinates in ens_transcript_exon_db[transcript]:
matching_transcripts.append(transcript)
other_coordinate_list=[]
for other_coordinates in ens_transcript_exon_db[transcript]:
if coordinates != other_coordinates: ### Add exon coordinates for all exons that DO NOT aligning to the probeset
other_coordinate_list.append(other_coordinates)
if len(other_coordinate_list)>1:
other_coordinate_list.sort()
### Instead of replacing the values in place, we need to do this (otherwise the original object will be modified)
other_coordinate_list = [[0,other_coordinate_list[0][1]]]+other_coordinate_list[1:-1]+[[other_coordinate_list[-1][0],0]]
#other_coordinate_list[0][0] = 0; other_coordinate_list[-1][-1] = 0
other_coordinate_list = convertListsToTuple(other_coordinate_list)
matching[tuple(other_coordinate_list)]=transcript
else:
not_matching_transcripts.append(transcript)
other_coordinate_list=[]
for other_coordinates in ens_transcript_exon_db[transcript]:
if coordinates != other_coordinates: ### Add exon coordinates for all exons that DO NOT aligning to the probeset
other_coordinate_list.append(other_coordinates)
if len(other_coordinate_list)>1:
other_coordinate_list.sort()
other_coordinate_list = [[0,other_coordinate_list[0][1]]]+other_coordinate_list[1:-1]+[[other_coordinate_list[-1][0],0]]
other_coordinate_list = convertListsToTuple(other_coordinate_list)
not_matching[tuple(other_coordinate_list)]=transcript
#print '\n',len(matching_transcripts), len(not_matching_transcripts);kill
### Can't have transcripts in matching and not matching
not_matching_transcripts2=[]; not_matching2={}
for transcript in not_matching_transcripts:
if transcript not in matching_transcripts: not_matching_transcripts2.append(transcript)
for coord in not_matching:
transcript = not_matching[coord]
if transcript in not_matching_transcripts2: not_matching2[coord] = not_matching[coord]
not_matching = not_matching2; not_matching_transcripts = not_matching_transcripts2
#if probeset == '3431530': print '3431530a', matching_transcripts,not_matching_transcripts
if len(matching)>0 and len(not_matching)>0:
perfect_match_found='no'; exon_match_data=[] ### List is only used if more than a single cassette exon difference between transcripts
for exon_list in matching:
matching_transcript = matching[exon_list]
if exon_list in not_matching:
not_matching_transcript = not_matching[exon_list]
valid_transcript_pairs[probeset] = matching_transcript, not_matching_transcript
perfect_match_found = 'yes'
#print probeset,matching_transcript, not_matching_transcript
#print ens_transcript_exon_db[matching_transcript],'\n'
#print ens_transcript_exon_db[not_matching_transcript]; kill
else:
unique_exon_count_db={} ### Determine how many exons are common and which are transcript distinct for a pair-wise comp
for exon_coor in exon_list:
try: unique_exon_count_db[exon_coor] +=1
except KeyError: unique_exon_count_db[exon_coor] =1
for exon_list in not_matching:
not_matching_transcript = not_matching[exon_list]
for exon_coor in exon_list:
try: unique_exon_count_db[exon_coor] +=1
except KeyError: unique_exon_count_db[exon_coor] =1
exon_count_db={}
for exon_coor in unique_exon_count_db:
num_trans_present = unique_exon_count_db[exon_coor]
try: exon_count_db[num_trans_present]+=1
except KeyError: exon_count_db[num_trans_present]=1
try:
exon_count_results = [exon_count_db[1],-1*(exon_count_db[2]),matching_transcript,not_matching_transcript]
exon_match_data.append(exon_count_results)
except KeyError:
null =[] ###Occurs if no exons are found in common (2)
#if probeset == '3431530': print '3431530b', exon_count_results
if perfect_match_found == 'no' and len(exon_match_data)>0:
exon_match_data.sort()
matching_transcript = exon_match_data[0][-2]
not_matching_transcript = exon_match_data[0][-1]
ok_transcript_pairs[probeset] = matching_transcript, not_matching_transcript
#if probeset == '3431530': print '3431530', matching_transcript, not_matching_transcript
else: match_pairs_missing+=1
###Export transcript comparison sets to an external file for different analyses
matching_transcripts = unique.unique(matching_transcripts)
not_matching_transcripts = unique.unique(not_matching_transcripts)
matching_transcripts=string.join(matching_transcripts,'|')
not_matching_transcripts=string.join(not_matching_transcripts,'|')
values = string.join([probeset,matching_transcripts,not_matching_transcripts],'\t')+'\n'
data.write(values)
print match_pairs_missing,'probesets missing either an alinging or non-alinging transcript'
print len(valid_transcript_pairs),'probesets with a single exon difference aligning to two isoforms'
print len(ok_transcript_pairs),'probesets with more than one exon difference aligning to two isoforms'
data.close()
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+species+'_top-transcript-matches.txt'
else:
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_top-transcript-matches.txt'
export_data = export.ExportFile(export_file)
for probeset in valid_transcript_pairs:
matching_transcript,not_matching_transcript = valid_transcript_pairs[probeset]
values = string.join([probeset,matching_transcript,not_matching_transcript],'\t')+'\n'
export_data.write(values)
for probeset in ok_transcript_pairs:
matching_transcript,not_matching_transcript = ok_transcript_pairs[probeset]
values = string.join([probeset,matching_transcript,not_matching_transcript],'\t')+'\n'
export_data.write(values)
#if probeset == '3431530': print '3431530d', matching_transcript,not_matching_transcript
export_data.close()
def compareExonCompositionJunctionArray(species,array_type):
print 'Finding optimal isoform matches for splicing events.'
###Import sequence aligned transcript associations for individual or probeset-pairs. Pairs provided for match-match
probeset_transcript_db,unique_ens_transcripts,unique_transcripts,all_transcripts = importProbesetTranscriptMatches(species,array_type,'yes')
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db = importEnsExonStructureDataSimple(filename,species,{},{},{},all_transcripts)
### Add UCSC transcript data to ens_transcript_exon_db and ens_gene_transcript_db
try:
filename = 'AltDatabase/ucsc/'+species+'/'+species+'_UCSC_transcript_structure_COMPLETE-mrna.txt' ### Use the non-filtered database to propperly analyze exon composition
ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db = importEnsExonStructureDataSimple(filename,species,ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db,all_transcripts)
except Exception: pass
""" Used to prioritize isoform pairs for further analysis. This file is re-written as AltAnalyze
builds it's database, hence, protein coding potential information accumulates as it runs."""
seq_files, protein_coding_isoforms = importProteinSequences(species,just_get_ids=False,just_get_length=True)
def isoformPrioritization(exon_match_data):
""" Prioritize based on the composition and whether the isoform is an Ensembl protein coding isoform.
Introduced 11/26/2017(2.1.1)."""
exon_match_data.sort(); exon_match_data.reverse()
coding_match=[]
ens_match=[]
alt_match=[]
for (unique_count,common_count,match,non) in exon_match_data:
### First pair is the best match
alt_match.append([match,non])
if 'ENS' in match and 'ENS' in non:
try: ens_match_count = protein_coding_isoforms[match]
except: ens_match_count = 10000
try: ens_non_count = protein_coding_isoforms[non]
except: ens_non_count = -10000
coding_diff = abs(ens_match_count-ens_non_count)
count_ratio = (coding_diff*1.00)/max(ens_match_count,ens_non_count)
ens_match.append([count_ratio,coding_diff,ens_match_count,match,non])
if match in protein_coding_isoforms and non in protein_coding_isoforms:
### Rank by AA differing
match_count = protein_coding_isoforms[match]
non_count = protein_coding_isoforms[non]
coding_diff = abs(match_count-non_count)
count_ratio = (coding_diff*1.00)/max(match_count,non_count)
coding_match.append([count_ratio,coding_diff,match_count,match,non])
coding_match.sort(); ens_match.sort()
if len(coding_match)>0: ### Prioritize minimal coding differences
count_ratio,diff,count,final_match,final_non = coding_match[0]
elif len(ens_match)>0:
count_ratio,diff,count,final_match,final_non = ens_match[0]
else:
final_match,final_non = alt_match[0]
"""
if len(coding_match)>0 and len(ens_match)>0:
if len(coding_match)!= len(ens_match):
print coding_match
print ens_match
print alt_match
if 'ENS' not in final_match:
print final_match,final_non
sys.exit()"""
return final_match,final_non
print len(probeset_transcript_db), "probesets with multiple pairs of matching-matching or matching-null transcripts."
### Identifying isoforms containing and not containing the probeset
global transcripts_not_found; marker = 5000; increment = 5000
match_pairs_missing=0; ok_transcript_pairs={}; transcripts_not_found={}
for probesets in probeset_transcript_db:
exon_match_data=[]; start_time = time.time()
### Examine all valid matches and non-matches
match_transcripts,not_match_transcripts = probeset_transcript_db[probesets]
match_transcripts = unique.unique(match_transcripts)
not_match_transcripts = unique.unique(not_match_transcripts)
for matching_transcript in match_transcripts:
if matching_transcript in ens_transcript_exon_db: ### If in the Ensembl or UCSC transcript database
transcript_match_coord = ens_transcript_exon_db[matching_transcript] ### Get the coordinates
for not_matching_transcript in not_match_transcripts:
if not_matching_transcript in ens_transcript_exon_db:
transcript_null_coord = ens_transcript_exon_db[not_matching_transcript]
unique_exon_count_db={} ### Determine how many exons are common and which are transcript distinct for a pair-wise comp
for exon_coor in transcript_match_coord:
try: unique_exon_count_db[tuple(exon_coor)] +=1
except KeyError: unique_exon_count_db[tuple(exon_coor)] =1
for exon_coor in transcript_null_coord:
try: unique_exon_count_db[tuple(exon_coor)] +=1
except KeyError: unique_exon_count_db[tuple(exon_coor)] =1
exon_count_db={}
for exon_coor in unique_exon_count_db:
num_trans_present = unique_exon_count_db[exon_coor]
try: exon_count_db[num_trans_present]+=1
except KeyError: exon_count_db[num_trans_present]=1
""" 11/26/2017(2.1.1): The below required that for two isoforms (matching/non-matching to an event),
that at least one exon was unique to a transcript and that at least one exon was in common to both.
This is not always the case, for example with complete intron retention, the coordinates in the
intron retained transcript will be distinct from the non-retained. The requirement was eleminated."""
try: iso_unique_exon_count = exon_count_db[1]
except: iso_unique_exon_count = 0
try: iso_common_exon_count = exon_count_db[2]
except: iso_common_exon_count = 0
exon_count_results = [iso_unique_exon_count,-1*iso_common_exon_count,matching_transcript,not_matching_transcript]
exon_match_data.append(exon_count_results)
#if probeset == '3431530': print '3431530b', exon_count_results
else:
### Should rarely occur since accession would be missing from the databases
transcripts_not_found[matching_transcript]=[]
""" Compare each isoform matching-nonmatching pair in terms of composition """
try:
matching_transcript,not_matching_transcript = isoformPrioritization(exon_match_data)
ok_transcript_pairs[probesets] = matching_transcript, not_matching_transcript
except Exception: pass
end_time = time.time()
if len(ok_transcript_pairs) == marker:
marker+= increment; print '*',
print match_pairs_missing,'junctions missing either an alinging or non-alinging transcript'
print len(transcripts_not_found),'transcripts not found'
#print len(ok_transcript_pairs),'probesets with more than one exon difference aligning to two isoforms'
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+species+'_top-transcript-matches.txt'
else: export_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_top-transcript-matches.txt'
export_data = export.ExportFile(export_file)
for probeset in ok_transcript_pairs:
matching_transcript,not_matching_transcript = ok_transcript_pairs[probeset]
values = string.join([probeset,matching_transcript,not_matching_transcript],'\t')+'\n'
export_data.write(values)
#if probeset == '3431530': print '3431530d', matching_transcript,not_matching_transcript
export_data.close()
def compareProteinComposition(species,array_type,translate,compare_all_features):
""" Objectives 11/26/2017(2.1.1):
1) Import ALL isoform matches for each junction or reciprocal junction pair
2) Obtain protein translations for ALL imported isoforms
3) With the protein isoform information, compare exon composition and protein coding length to pick two representative isoforms
4) Find protein compositional differences between these pairs
This is a modification of the previous workflow which returned fewer hits that were likely less carefully selected."""
probeset_transcript_db,unique_ens_transcripts,unique_transcripts,all_transcripts = importProbesetTranscriptMatches(species,array_type,'yes')
all_transcripts=[]
"""if translate == 'yes': ### Used if we want to re-derive all transcript-protein sequences - set to yes1 when we want to disable this option
transcript_protein_seq_db = translateRNAs(unique_transcripts,unique_ens_transcripts,'fetch')
else: """
### Translate all mRNA seqeunces to proteins where possible
transcript_protein_seq_db = translateRNAs(unique_transcripts,unique_ens_transcripts,'fetch_new')
### Find the best isoform pairs for each splicing event
compareExonCompositionJunctionArray(species,array_type)
### Re-import isoform associations for the top events exported from the above function (top versus all isoforms)
probeset_transcript_db,unique_ens_transcripts,unique_transcripts,all_transcripts = importProbesetTranscriptMatches(species,array_type,'no')
probeset_protein_db,protein_seq_db = convertTranscriptToProteinAssociations(probeset_transcript_db,transcript_protein_seq_db,'no')
transcript_protein_seq_db=[]
global protein_ft_db
if array_type == 'exon' or array_type == 'gene' or data_type == 'exon':
probeset_gene_db = importSplicingAnnotationDatabase(array_type,species,'no')
elif (array_type == 'junction' or array_type == 'RNASeq'):
probeset_gene_db = importJunctionDatabase(species,array_type)
elif array_type == 'AltMouse':
probeset_gene_db = importAltMouseJunctionDatabase(species,array_type)
genes_being_analyzed={} ### Need to tell 'grab_exon_level_feature_calls' what genes to analyze
for probeset in probeset_gene_db:
if probeset in probeset_protein_db: ### Filter for genes that have probesets with matching and non-matching proteins
gene = probeset_gene_db[probeset][0]; genes_being_analyzed[gene]=[gene]
protein_ft_db,domain_gene_counts = FeatureAlignment.grab_exon_level_feature_calls(species,array_type,genes_being_analyzed)
if compare_all_features == 'yes': ### Used when comparing all possible PROTEIN pairs to find minimal domain changes
compareProteinFeaturesForPairwiseComps(probeset_protein_db,protein_seq_db,probeset_gene_db,species,array_type)
ExonAnalyze_module.identifyAltIsoformsProteinComp(probeset_gene_db,species,array_type,protein_ft_db,compare_all_features,data_type)
def remoteTranslateRNAs(Species,unique_transcripts,unique_ens_transcripts,analysis_type):
global species
species = Species
translateRNAs(unique_transcripts,unique_ens_transcripts,analysis_type)
def translateRNAs(unique_transcripts,unique_ens_transcripts,analysis_type):
if analysis_type == 'local':
### Get protein ACs for UCSC transcripts if provided by UCSC (NOT CURRENTLY USED BY THE PROGRAM!!!!)
mRNA_protein_db,missing_protein_ACs_UCSC = importUCSCSequenceAssociations(species,unique_transcripts)
### For missing_protein_ACs, check to see if they are in UniProt. If so, export the protein sequence
try: missing_protein_ACs_UniProt = importUniProtSeqeunces(species,mRNA_protein_db,missing_protein_ACs_UCSC)
except Exception: null=[]
### For transcripts with protein ACs, see if we can find sequence from NCBI
#missing_protein_ACs_NCBI = importNCBIProteinSeq(mRNA_protein_db)
### Combine missing_protein_ACs_NCBI and missing_protein_ACs_UniProt
#for mRNA_AC in missing_protein_ACs_NCBI: missing_protein_ACs_UniProt[mRNA_AC] = missing_protein_ACs_NCBI[mRNA_AC]
### Import mRNA sequences for mRNA ACs with no associated protein sequence and submit for in silico translation
missing_protein_ACs_inSilico = importUCSCSequences(missing_protein_ACs_NCBI)
else:
try: missing_protein_ACs_UniProt = importUniProtSeqeunces(species,{},{})
except Exception, e:
print e
null=[]
### Export Ensembl protein sequences for matching isoforms and identify transcripts without protein seqeunce
ensembl_protein_seq_db, missing_protein_ACs_Ensembl = importEnsemblProteinSeqData(species,unique_ens_transcripts)
### Import Ensembl mRNA sequences for mRNA ACs with no associated protein sequence and submit for in silico translation
missing_Ens_protein_ACs_inSilico = importEnsemblTranscriptSequence(missing_protein_ACs_Ensembl)
if analysis_type == 'fetch':
ac_list = []
for ac in unique_transcripts: ac_list.append(ac)
try: ### Get rid of the second file which will not be immediately over-written and reading before regenerating
output_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-Protein_sequences_'+str(2)+'.txt'
fn=filepath(output_file); os.remove(fn)
except Exception: null=[]
try: fetchSeq(ac_list,'nucleotide',1)
except Exception:
print traceback.format_exc(),'\n'
print 'WARNING!!!!! NCBI webservice connectivity failed due to the above error!!!!!\n'
###Import protein sequences
seq_files, transcript_protein_db = importProteinSequences(species,just_get_ids=True)
print len(unique_ens_transcripts)+len(unique_transcripts), 'examined transcripts.'
print len(transcript_protein_db),'transcripts with associated protein sequence.'
missing_protein_data=[]
#transcript_protein_db={} ### Use to override existing mRNA-protein annotation files
for ac in unique_transcripts:
if ac not in transcript_protein_db: missing_protein_data.append(ac)
###Search NCBI using the esearch not efetch function to get valid GIs (some ACs make efetch crash)
try: missing_gi_list = searchEntrez(missing_protein_data,'nucleotide')
except Exception,e:
print 'Exception encountered:',e
try: missing_gi_list = searchEntrez(missing_protein_data,'nucleotide')
except Exception:
print 'Exception encountered:',e
try: missing_gi_list = searchEntrez(missing_protein_data,'nucleotide')
except Exception:
print traceback.format_exc(),'\n'
print 'WARNING!!!!! NCBI webservice connectivity failed due to the above error!!!!!\n'
try: fetchSeq(missing_gi_list,'nucleotide',len(seq_files)-2)
except Exception:
print traceback.format_exc(),'\n'
print 'WARNING!!!!! NCBI webservice connectivity failed due to the above error!!!!!\n'
seq_files, transcript_protein_seq_db = importProteinSequences(species)
print len(unique_ens_transcripts)+len(unique_transcripts), 'examined transcripts.'
print len(transcript_protein_seq_db),'transcripts with associated protein sequence.'
return transcript_protein_seq_db
def convertTranscriptToProteinAssociations(probeset_transcript_db,transcript_protein_seq_db,compare_all_features):
### Convert probeset-transcript match db to probeset-protein match db
probeset_protein_db={}; compared_protein_ids={}
for probeset in probeset_transcript_db:
match_transcripts,not_match_transcripts = probeset_transcript_db[probeset]
match_proteins=[]; not_match_proteins=[]
for transcript in match_transcripts:
if transcript in transcript_protein_seq_db:
protein_id = transcript_protein_seq_db[transcript][0]; match_proteins.append(protein_id); compared_protein_ids[protein_id]=[]
for transcript in not_match_transcripts:
if transcript in transcript_protein_seq_db:
protein_id = transcript_protein_seq_db[transcript][0]; not_match_proteins.append(protein_id); compared_protein_ids[protein_id]=[]
if len(match_proteins)>0 and len(not_match_proteins)>0:
probeset_protein_db[probeset]=[match_proteins,not_match_proteins]
protein_seq_db={}
for transcript in transcript_protein_seq_db:
protein_id, protein_seq = transcript_protein_seq_db[transcript]
if protein_id in compared_protein_ids: protein_seq_db[protein_id] = [protein_seq]
transcript_protein_seq_db=[]
if compare_all_features == 'no': ### If yes, all pairwise comps need to still be examined
title_row = 'Probeset\tAligned protein_id\tNon-aligned protein_id'
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-protein-dbase_exoncomp.txt'
else: export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-dbase_exoncomp.txt'
exportSimple(probeset_protein_db,export_file,title_row)
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/SEQUENCE-protein-dbase_exoncomp.txt'
else: export_file = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_exoncomp.txt'
exportSimple(protein_seq_db,export_file,'')
return probeset_protein_db,protein_seq_db
def importProteinSequences(species,just_get_ids=False,just_get_length=False):
transcript_protein_seq_db={}
import_dir = '/AltDatabase/'+species+'/SequenceData/output/sequences' ### Multi-species fiel
g = GrabFiles(); g.setdirectory(import_dir)
seq_files = g.searchdirectory('Transcript-')
for seq_file in seq_files:
fn=filepath(seq_file)
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
mRNA_AC,protein_AC,protein_seq = string.split(probeset_data,'\t')
if just_get_ids or just_get_length:
if just_get_length:
transcript_protein_seq_db[mRNA_AC]=len(protein_seq)
else:
transcript_protein_seq_db[mRNA_AC]=protein_AC
else: transcript_protein_seq_db[mRNA_AC] = protein_AC,protein_seq
return seq_files, transcript_protein_seq_db
def importCDScoordinates(species):
""" Read in the CDS locations for Ensembl proteins, NCBI proteins and in silico derived proteins and then export to a single file """
cds_location_db={}
errors = {}
import_dir = '/AltDatabase/'+species+'/SequenceData/output/details'
import_dir2 = '/AltDatabase/ensembl/'+species
g = GrabFiles(); g.setdirectory(import_dir)
g2 = GrabFiles(); g2.setdirectory(import_dir2)
seq_files = g.searchdirectory('Transcript-')
seq_files += g2.searchdirectory('EnsemblTranscriptCDSPositions')
output_file = 'AltDatabase/ensembl/'+species +'/AllTranscriptCDSPositions.txt'
dataw = export.ExportFile(output_file)
for seq_file in seq_files:
fn=filepath(seq_file)
for line in open(fn,'rU').xreadlines():
line_data = cleanUpLine(line) #remove endline
line_data = string.replace(line_data,'>','') ### occurs for some weird entries
line_data = string.replace(line_data,'<','') ### occurs for some weird entries
if 'join' in line_data:
### Occures when the cds entry looks like this: AL137661 - join(203..1267,1267..2187) or 'AL137661\tjoin(203\t1267,1267\t2187)'
t = string.split(line_data,'\t')
mRNA_AC = t[0]; start = t[1][5:]; stop = t[-1][:-1]
else:
line_data = string.replace(line_data,'complement','') ### occurs for some weird entries
line_data = string.replace(line_data,')','') ### occurs for some weird entries
line_data = string.replace(line_data,'(','') ### occurs for some weird entries
try:
mRNA_AC,start,stop = string.split(line_data,'\t')
try:
cds_location_db[mRNA_AC] = int(start),int(stop)
dataw.write(string.join([mRNA_AC,start,stop],'\t')+'\n')
except Exception:
errors[line_data]=[]
#print line_data;sys.exit()
except Exception:
errors[line_data]=[]
#print line_data;sys.exit()
print len(errors),'errors...out of',len(cds_location_db)
dataw.close()
return cds_location_db
def importProbesetTranscriptMatches(species,array_type,compare_all_features):
if compare_all_features == 'yes': ### Used when comparing all possible PROTEIN pairs
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
filename = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+species+'_all-transcript-matches.txt'
else: filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_all-transcript-matches.txt'
else: ### Used after comparing all possible TRANSCRIPT STRUCTURE pairs
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
filename = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/'+species+'_top-transcript-matches.txt'
else: filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_top-transcript-matches.txt'
print 'Imported:',filename
fn=filepath(filename); probeset_transcript_db={}; unique_transcripts={}; unique_ens_transcripts={}; all_transcripts={}
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
try: probeset,match_transcripts,not_match_transcripts = string.split(probeset_data,'\t')
except IndexError: print t;kill
#if probeset == '2991483':
match_transcripts = string.split(match_transcripts,'|')
not_match_transcripts = string.split(not_match_transcripts,'|')
### Multiple transcript comparison sets can exist, depending on how many unique exon coordiantes the probeset aligns to
probeset_transcript_db[probeset] = match_transcripts,not_match_transcripts
consider_all_ensembl = 'no'
if array_type == 'RNASeq': ### Needed for Ensembl gene IDs that don't have 'ENS' in them.
if 'ENS' not in probeset: consider_all_ensembl = 'yes'
###Store unique transcripts
for transcript in match_transcripts:
if 'ENS' in transcript or consider_all_ensembl == 'yes': unique_ens_transcripts[transcript]=[]
else: unique_transcripts[transcript]=[]
if consider_all_ensembl == 'yes': unique_transcripts[transcript]=[] ### This redundant, but is the most unbiased way to examine all non-Ensembls as well
all_transcripts[transcript]=[]
for transcript in not_match_transcripts:
if 'ENS' in transcript or consider_all_ensembl == 'yes': unique_ens_transcripts[transcript]=[]
else: unique_transcripts[transcript]=[]
if consider_all_ensembl == 'yes': unique_transcripts[transcript]=[] ### This redundant, but is the most unbiased way to examine all non-Ensembls as well
all_transcripts[transcript]=[]
print 'len(unique_ens_transcripts)',len(unique_ens_transcripts)
print 'len(unique_transcripts)',len(unique_transcripts)
return probeset_transcript_db,unique_ens_transcripts,unique_transcripts,all_transcripts
def searchEntrez(accession_list,bio_type):
start_time = time.time()
Entrez.email = "[email protected]" # Always tell NCBI who you are
index=0; gi_list=[]
while index<len(accession_list)+20:
try: new_accession_list = accession_list[index:index+20]
except IndexError: new_accession_list = accession_list[index:]
if len(new_accession_list)<1: break
search_handle = Entrez.esearch(db=bio_type,term=string.join(new_accession_list,','))
search_results = Entrez.read(search_handle)
gi_list += search_results["IdList"]
index+=20
end_time = time.time(); time_diff = int(end_time-start_time)
print "finished in %s seconds" % time_diff
return gi_list
def fetchSeq(accession_list,bio_type,version):
output_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-Protein_sequences_'+str(version)+'.txt'
datar = export.ExportFile(output_file)
output_file = 'AltDatabase/'+species+'/SequenceData/output/details/Transcript-Protein_sequences_'+str(version)+'.txt'
datad = export.ExportFile(output_file)
print len(accession_list), "mRNA Accession numbers submitted to eUTILs."
start_time = time.time()
Entrez.email = "[email protected]" # Always tell NCBI who you are
index=0
while index<len(accession_list)+200:
try: new_accession_list = accession_list[index:index+200]
except IndexError: new_accession_list = accession_list[index:]
if len(new_accession_list)<1: break
### epost doesn't concatenate everything into a URL where esearch
try:
search_handle = Entrez.efetch(db=bio_type,id=string.join(new_accession_list,','),retmode="xml") #,usehistory="y"
search_results = Entrez.read(search_handle)
except Exception:
try: ### Make sure it's not due to an internet connection issue
search_handle = Entrez.efetch(db=bio_type,id=string.join(new_accession_list,','),retmode="xml") #,usehistory="y"
search_results = Entrez.read(search_handle)
except Exception: index+=200; continue
for a in search_results: ### list of dictionaries
accession = a['GBSeq_primary-accession']
if bio_type == 'nucleotide':
mRNA_seq=''; proceed = 'no'; get_location = 'no'
### Get translated protein sequence if available
try:
for entry in a["GBSeq_feature-table"]:
for key in entry: ### Key's in dictionary
if key == 'GBFeature_quals': ### composed of a list of dictionaries
for i in entry[key]:
if i['GBQualifier_name'] == 'protein_id': protein_id = i['GBQualifier_value']
if i['GBQualifier_name'] == 'translation': protein_sequence = i['GBQualifier_value']; proceed = 'yes'
if key == 'GBFeature_key':
if entry[key] == 'CDS':
get_location = 'yes'
else: get_location = 'no' ### Get CDS location (occurs only following the GBFeature_key CDS, but not after)
if key == 'GBFeature_location' and get_location == 'yes':
cds_location = entry[key]
except KeyError: null = []
alt_seq='no'
if proceed == 'yes':
if protein_sequence[0] != 'M': proceed = 'no'; alt_seq = 'yes'
else:
values = [accession,protein_id,protein_sequence]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(accession+'\t'+string.replace(cds_location,'..','\t')+'\n')
else: ### Translate mRNA seq to protein
mRNA_seq = a['GBSeq_sequence']
mRNA_db={}; mRNA_db[accession] = '',mRNA_seq
translation_db = BuildInSilicoTranslations(mRNA_db)
for mRNA_AC in translation_db: ### Export in silico protein predictions
protein_id, protein_sequence,cds_location = translation_db[mRNA_AC]
values = [mRNA_AC,protein_id,protein_sequence]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(mRNA_AC+'\t'+string.replace(cds_location,'..','\t')+'\n')
if len(translation_db)==0 and alt_seq == 'yes': ### If no protein sequence starting with an "M" found, write the listed seq
values = [accession,protein_id,protein_sequence]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(accession+'\t'+string.replace(cds_location,'..','\t')+'\n')
else: protein_sequence = a['GBSeq_sequence']
index+=200
"""print 'accession',accession
print 'protein_id',protein_id
print 'protein_sequence',protein_sequence
print 'mRNA_seq',mRNA_seq,'\n' """
end_time = time.time(); time_diff = int(end_time-start_time)
print "finished in %s seconds" % time_diff
datar.close()
datad.close()
def importEnsemblTranscriptSequence(missing_protein_ACs):
import_dir = '/AltDatabase/'+species+'/SequenceData' ### Multi-species fiel
g = GrabFiles(); g.setdirectory(import_dir)
seq_files = g.searchdirectory('.cdna.all.fa'); seq_files.sort(); filename = seq_files[-1]
#filename = 'AltDatabase/'+species+'/SequenceData/'+species+'_ensembl_cDNA.fasta.txt'
start_time = time.time()
output_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-EnsInSilicoProt_sequences.txt'
datar = export.ExportFile(output_file)
output_file = 'AltDatabase/'+species+'/SequenceData/output/details/Transcript-EnsInSilicoProt_sequences.txt'
datad = export.ExportFile(output_file)
print "Begining generic fasta import of",filename
fn=filepath(filename); translated_mRNAs={}; sequence = ''
for line in open(fn,'r').xreadlines():
try: data, newline= string.split(line,'\n')
except ValueError: continue
try:
if data[0] == '>':
if len(sequence) > 0:
if transid in missing_protein_ACs:
### Perform in silico translation
mRNA_db = {}; mRNA_db[transid] = '',sequence[1:]
translation_db = BuildInSilicoTranslations(mRNA_db)
for mRNA_AC in translation_db: ### Export in silico protein predictions
protein_id, protein_seq, cds_location = translation_db[mRNA_AC]
values = [mRNA_AC,protein_id,protein_seq]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(mRNA_AC+'\t'+string.replace(cds_location,'..','\t')+'\n')
translated_mRNAs[mRNA_AC]=[]
### Parse new line
#>ENST00000400685 cdna:known supercontig::NT_113903:9607:12778:1 gene:ENSG00000215618
t= string.split(data[1:],':'); sequence=''
transid_data = string.split(t[0],' '); transid = transid_data[0]
if '.' in transid:
transid = string.split(transid,'.')[0]
#try: ensembl_id,chr,strand,transid,prot_id = t
#except ValueError: ensembl_id,chr,strand,transid = t
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
#### Add the last entry
if len(sequence) > 0:
if transid in missing_protein_ACs:
### Perform in silico translation
mRNA_db = {}; mRNA_db[transid] = '',sequence[1:]
translation_db = BuildInSilicoTranslations(mRNA_db)
for mRNA_AC in translation_db: ### Export in silico protein predictions
protein_id, protein_seq, cds_location = translation_db[mRNA_AC]
values = [mRNA_AC,protein_id,protein_seq]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(mRNA_AC+'\t'+string.replace(cds_location,'..','\t')+'\n')
translated_mRNAs[mRNA_AC]=[]
datar.close()
datad.close()
end_time = time.time(); time_diff = int(end_time-start_time)
print "Ensembl transcript sequences analyzed in %d seconds" % time_diff
missing_protein_ACs_inSilico=[]
for mRNA_AC in missing_protein_ACs:
if mRNA_AC not in translated_mRNAs:
missing_protein_ACs_inSilico.append(mRNA_AC)
print len(missing_protein_ACs_inSilico), 'Ensembl mRNAs without mRNA sequence NOT in silico translated (e.g., lncRNAs)', missing_protein_ACs_inSilico[:10]
def importEnsemblProteinSeqData(species,unique_ens_transcripts):
from build_scripts import FeatureAlignment
protein_relationship_file,protein_feature_file,protein_seq_fasta,null = FeatureAlignment.getEnsemblRelationshipDirs(species)
ens_transcript_protein_db = FeatureAlignment.importEnsemblRelationships(protein_relationship_file,'transcript')
unique_ens_proteins = {}
for transcript in ens_transcript_protein_db:
if transcript in unique_ens_transcripts:
protein_id = ens_transcript_protein_db[transcript]
if len(protein_id)>1:
unique_ens_proteins[protein_id] = transcript
ensembl_protein_seq_db = importEnsemblProtSeq(protein_seq_fasta,unique_ens_proteins)
transcript_with_prot_seq = {}
for protein_id in ensembl_protein_seq_db:
if protein_id in unique_ens_proteins:
transcript = unique_ens_proteins[protein_id]
transcript_with_prot_seq[transcript]=[]
missing_ens_proteins={}
for transcript in unique_ens_transcripts:
if transcript not in transcript_with_prot_seq: missing_ens_proteins[transcript]=[]
print len(ensembl_protein_seq_db),'Ensembl transcripts linked to protein sequence and',len(missing_ens_proteins), 'transcripts missing protein sequence.'
return ensembl_protein_seq_db, missing_ens_proteins
def importEnsemblProtSeq(filename,unique_ens_proteins):
export_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-EnsProt_sequences.txt'
export_data = export.ExportFile(export_file)
fn=filepath(filename); ensembl_protein_seq_db={}; sequence = ''; y=0
for line in open(fn,'r').xreadlines():
try: data, newline= string.split(line,'\n'); y+=1
except ValueError: continue
try:
if data[0] == '>':
if len(sequence) > 0:
try: ensembl_prot = ensembl_prot
except Exception: print data,y,t;kill
if ensembl_prot in unique_ens_proteins:
mRNA_AC = unique_ens_proteins[ensembl_prot]
values = string.join([mRNA_AC,ensembl_prot,sequence],'\t')+'\n'
export_data.write(values); ensembl_protein_seq_db[ensembl_prot] = []
### Parse new line
t= string.split(data[1:],' '); sequence=''
ensembl_prot = t[0]
if '.' in ensembl_prot:
ensembl_prot = string.split(ensembl_prot,'.')[0] ### Added to Ensembl after version 77
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
if ensembl_prot in unique_ens_proteins:
mRNA_AC = unique_ens_proteins[ensembl_prot]
values = string.join([mRNA_AC,ensembl_prot,sequence],'\t')+'\n'
export_data.write(values); ensembl_protein_seq_db[ensembl_prot] = []
export_data.close()
return ensembl_protein_seq_db
def importUniProtSeqeunces(species,transcripts_with_uniprots,transcripts_to_analyze):
global n_terminal_seq; global c_terminal_seq
n_terminal_seq={}; c_terminal_seq={}
export_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-UniProt_sequences.txt'
export_data = export.ExportFile(export_file)
#filename = 'AltDatabase/'+species+'/SequenceData/'+'uniprot_trembl_sequence.txt'
filename = 'AltDatabase/uniprot/'+species+'/uniprot_sequence.txt'
fn=filepath(filename); transcript_to_uniprot={}
unigene_ensembl_up = {}
for line in open(fn,'r').readlines():
data, newline= string.split(line,'\n')
t = string.split(data,'\t')
id=t[0];ac=t[1];ensembls=t[4];seq=t[2];type=t[6];unigenes=t[7];embls=t[9]
ac=string.split(ac,','); embls=string.split(embls,',') #; ensembls=string.split(ensembls,','); unigenes=string.split(unigenes,',')
if type != 'swissprot1': ### unclear why this condition was excluding swissprot so added 1 - version 2.1.1
### Note: These associations are based on of UCSC, which in some cases don't look correct: see AY429540 and Q75N08 from the KgXref file.
### Possibly exclude
ac = ac[0]
if ac in transcripts_with_uniprots:
mRNA_ACs = transcripts_with_uniprots[ac]
for mRNA_AC in mRNA_ACs:
transcript_to_uniprot[mRNA_AC] = []
values = string.join([mRNA_AC,ac,seq],'\t')+'\n'; export_data.write(values)
for embl in embls:
proceed = 'no'
if (len(embl)>0) and type == 'fragment': ###NOTE: Fragment annotated seem to be the only protein IDs that contain direct references to a specific mRNA rather than promiscous (as opposed to Swissprot and Variant)
if embl in transcripts_to_analyze: proceed = 'yes'
elif embl in transcripts_with_uniprots: proceed = 'yes'
if proceed == 'yes':
if embl not in transcript_to_uniprot:
transcript_to_uniprot[embl] = []
values = string.join([embl,id,seq],'\t')+'\n'; export_data.write(values)
n_terminal_seq[seq[:5]] = []
c_terminal_seq[seq[-5:]] = []
export_data.close()
missing_protein_ACs={}
for mRNA_AC in transcripts_to_analyze:
if mRNA_AC not in transcript_to_uniprot: missing_protein_ACs[mRNA_AC]=[]
for protein_AC in transcripts_with_uniprots:
mRNA_ACs = transcripts_with_uniprots[protein_AC]
for mRNA_AC in mRNA_ACs:
if mRNA_AC not in transcript_to_uniprot: missing_protein_ACs[mRNA_AC]=[]
if len(transcripts_to_analyze)>0: ### Have to submitt ACs to report them
print len(missing_protein_ACs), 'missing protein ACs for associated UniProt mRNAs and', len(transcript_to_uniprot), 'found.'
print len(n_terminal_seq),len(c_terminal_seq),'N and C terminal, respectively...'
return missing_protein_ACs
def BuildInSilicoTranslations(mRNA_db):
"""
BI517798
Seq('ATGTGGCCAGGAGACGCCACTGGAGAACATGCTGTTCGCCTCCTTCTACCTTCTGGATTT ...', IUPACUnambiguousDNA())
213 MWPGDATGEHAVRLLLPSGFYPGFSWQYPGSVAFHPRPQVRDPGQRVPDASGRGRLVVRAGPAHPPGLPLLWEPLAIWGNRMPSHRLPLLPQHVRQHLLPHLHQRRPFPGHCAPGQVPQAPQAPLRTPGLCLPVGGGGCGHGPAAGEPTDRADKHTVGLPAAVPGEGSNMPGVPWQWPSLPVHHQVTCTVIIRSCGRPRVEKALRTRQGHESP
211 MNGLEVAPPGLITNFSLATAEQCGQETPLENMLFASFYLLDFILALVGNTLALWLFIRDHKSGTPANVFLMHLAVADLSCVLVLPTRLVYHFSGNHWPFGEIACRLTGFLFYLNMYASIYFLTCISADRFLAIVHPVKSLKLRRPLYAHLACAFLWVVVAVAMAPLLVSPQTVQTNTRWVCLQLYREKAPTCLVSLGSGLHFPFITRSRVL
"""
translation_db={}
from Bio.Seq import Seq
### New Biopython methods - http://biopython.org/wiki/Seq
from Bio.Alphabet import generic_dna
### Deprecated
#from Bio.Alphabet import IUPAC
#from Bio import Translate ### deprecated
#print 'Begining in silco translation for',len(mRNA_db),'sequences.'
def cleanSeq(input_seq):
"""Wrapper for Biopython translate function. Bio.Seq.translate will complain if input sequence is
not a mulitple of 3. This wrapper function passes an acceptable input to Bio.Seq.translate in order to
avoid this warning."""
#https://github.com/broadinstitute/oncotator/pull/265/commits/94b20aabff48741a92b3f9e608e159957af6af30
trailing_bases = len(input_seq) % 3
if trailing_bases:
input_seq = ''.join([input_seq, 'NN']) if trailing_bases == 1 else ''.join([input_seq, 'N'])
return input_seq
first_time = 1
for mRNA_AC in mRNA_db:
if mRNA_AC == 'AK025306': print '@@@@@@@@@@^^^^AK025306...attempting in silico translation'
temp_protein_list=[]; y=0
protein_id,sequence = mRNA_db[mRNA_AC]
if protein_id == '': protein_id = mRNA_AC+'-PEP'
original_seqeunce = sequence
sequence = string.upper(sequence)
loop=0
while (string.find(sequence,'ATG')) != -1: #while there is still a methionine in the DNA sequence, reload this DNA sequence for translation: find the longest ORF
x = string.find(sequence,'ATG') #before doing this, need to find the start codon ourselves
y += x #maintain a running count of the sequence position
if loop!=0: y+=3 ### This accounts for the loss in sequence_met
#if y<300: print original_seqeunce[:y+2], x
sequence_met = sequence[x:] #x gives us the position where the first Met* is.
### New Biopython methods - http://biopython.org/wiki/Seq
dna_clean = cleanSeq(sequence_met)
dna_seq = Seq(dna_clean, generic_dna)
prot_seq = dna_seq.translate(to_stop=True)
### Deprecated code
#seq_type = IUPAC.unambiguous_dna
#dna_seq = Seq(sequence_met,seq_type)
#standard_translator = Translate.unambiguous_dna_by_id[1]
#prot_seq = standard_translator.translate_to_stop(dna_seq) #convert the dna to protein sequence
#prot_seq_string = prot_seq.tostring()
prot_seq_string = str(prot_seq)
prot_seq_tuple = len(prot_seq_string),y,prot_seq_string,dna_seq #added DNA sequence to determine which exon we're in later
temp_protein_list.append(prot_seq_tuple) #create a list of protein sequences to select the longest one
sequence = sequence_met[3:] # sequence_met is the sequence after the first or proceeduring methionine, reset the sequence for the next loop
loop+=1
if len(temp_protein_list) == 0:
continue
else:
#temp_protein_list = pick_optimal_peptide_seq(temp_protein_list) ###Used a more complex method in the original code to determine the best selection
temp_protein_list.sort(); temp_protein_list.reverse()
peptide_len1 = temp_protein_list[0][0]
prot_seq_string = temp_protein_list[0][2] #extract out the protein sequence string portion of the tuple
coding_dna_seq_string = temp_protein_list[0][3]
pos1 = temp_protein_list[0][1] ###position in DNA sequence where the translation starts
n_term1 = prot_seq_string[:5]; c_term1 = prot_seq_string[-5:]
###Check the top protein sequences and see if there are frame differences
choose = 0
for protein_data in temp_protein_list[1:]: ###exlcude the first entry
peptide_len2 = protein_data[0]; pos2= protein_data[1]
percent_of_top = (float(peptide_len1)/peptide_len2)*100
if (percent_of_top>70) and (peptide_len2>20):
prot_seq_string2 = protein_data[2]
n_term2 = prot_seq_string2[:5]; c_term2 = prot_seq_string2[-5:]
frame_shift = check4FrameShifts(pos1,pos2)
if frame_shift == 'yes':
###determine which prediction is better to use
if n_term1 in n_terminal_seq: choose = 1
elif n_term2 in n_terminal_seq: choose = 2
elif c_term1 in c_terminal_seq: choose = 1
elif c_term2 in c_terminal_seq: choose = 2
if choose == 2:
prot_seq_string = protein_data[2]
coding_dna_seq_string = protein_data[3]
alt_prot_seq_string = temp_protein_list[0][2]
alt_coding_dna_seq_string = temp_protein_list[0][3]
pos1 = protein_data[1]
if first_time == 0:
print mRNA_AC
print coding_dna_seq_string
print len(prot_seq_string),prot_seq_string
print alt_coding_dna_seq_string
print len(alt_prot_seq_string),alt_prot_seq_string
first_time = 1
###write this data out in the future
else: break
else: break ###do not need to keep looking
dl = (len(prot_seq_string))*3 #figure out what the DNA coding sequence length is
#dna_seq_string_coding_to_end = coding_dna_seq_string.tostring()
dna_seq_string_coding_to_end = str(coding_dna_seq_string)
coding_dna_seq_string = dna_seq_string_coding_to_end[0:dl]
cds_location = str(pos1+1)+'..'+str(pos1+len(prot_seq_string)*3+3)
### Determine if a stop codon is in the sequence or there's a premature end
coding_diff = len(dna_seq_string_coding_to_end) - len(coding_dna_seq_string)
if coding_diff > 4: stop_found = 'stop-codon-present'
else: stop_found = 'stop-codon-absent'
#print [mRNA_AC],[protein_id],prot_seq_string[0:10]
if mRNA_AC == 'AK025306': print '*********AK025306',[protein_id],prot_seq_string[0:10]
translation_db[mRNA_AC] = protein_id,prot_seq_string,cds_location
return translation_db
def check4FrameShifts(pos1,pos2):
pos_diff = abs(pos2 - pos1)
str_codon_diff = str(float(pos_diff)/3)
value1,value2 = string.split(str_codon_diff,'.')
if value2 == '0': frame_shift = 'no'
else: frame_shift = 'yes'
return frame_shift
def convertListsToTuple(list_of_lists):
list_of_tuples=[]
for ls in list_of_lists:
list_of_tuples.append(tuple(ls))
return list_of_tuples
def compareProteinFeaturesForPairwiseComps(probeset_protein_db,protein_seq_db,probeset_gene_db,species,array_type):
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/probeset-protein-dbase_seqcomp.txt'
else: export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-dbase_seqcomp.txt'
fn=filepath(export_file); export_data1 = open(fn,'w')
title_row = 'Probeset\tAligned protein_id\tNon-aligned protein_id\n'; export_data1.write(title_row)
minimal_effect_db={}; accession_seq_db={};start_time = time.time()
print "Comparing protein features for all pair-wise comparisons"
for probeset in probeset_protein_db:
geneid = probeset_gene_db[probeset][0] ###If one probeset
match_list,null_list = probeset_protein_db[probeset]
prot_match_list=[]; prot_null_list=[]
for protein_ac in match_list:
protein_seq = protein_seq_db[protein_ac][0]
prot_match_list.append([protein_ac,protein_seq])
for protein_ac in null_list:
protein_seq = protein_seq_db[protein_ac][0]
prot_null_list.append([protein_ac,protein_seq])
### Compare all possible protein combinations to find those with the minimal change in (1) sequence and (2) domain compositions
if len(prot_match_list)>0 and len(prot_null_list)>0:
results_list=[]
for mi in prot_match_list:
for ni in prot_null_list:
results = characterizeProteinLevelExonChanges(probeset,geneid,mi,ni,array_type)
results_list.append(results)
results_list.sort()
hit_ac = results_list[0][-2]; null_ac = results_list[0][-1]
values = string.join([probeset,hit_ac,null_ac],'\t')+'\n'; export_data1.write(values)
#minimal_effect_db[probeset] = [hit_ac,null_ac]
accession_seq_db[hit_ac] = [protein_seq_db[hit_ac][0]]
accession_seq_db[null_ac] = [protein_seq_db[null_ac][0]]
#print results_list[0][-2],results_list[0][-1]
#print probeset,len(prot_match_list),len(prot_null_list),results_list;kill
print len(minimal_effect_db),"optimal pairs of probesets linked to Ensembl found."
end_time = time.time(); time_diff = int(end_time-start_time)
print "databases built in %d seconds" % time_diff
export_data1.close()
"""
title_row = 'Probeset\tAligned protein_id\tNon-aligned protein_id'
export_file = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-dbase_seqcomp.txt'
exportSimple(minimal_effect_db,export_file,title_row)"""
if (array_type == 'junction' or array_type == 'RNASeq') and data_type != 'null':
export_file = 'AltDatabase/'+species+'/'+array_type+'/'+data_type+'/SEQUENCE-protein-dbase_seqcomp.txt'
else: export_file = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_seqcomp.txt'
exportSimple(accession_seq_db,export_file,'')
def exportSimple(db,output_file,title_row):
data = export.ExportFile(output_file)
if len(title_row)>0: data.write(title_row+'\n')
for key in db:
try: values = string.join([key] + db[key],'\t')+'\n'; data.write(values)
except TypeError:
list2=[]
for list in db[key]: list2+=list
values = string.join([key] + list2,'\t')+'\n'; data.write(values)
data.close()
print output_file, 'exported...'
def characterizeProteinLevelExonChanges(uid,array_geneid,hit_data,null_data,array_type):
domains=[]; functional_attribute=[]; probeset_num = 1
if '|' in uid:
probeset,probeset2 = string.split(uid,'|')
probeset_num = 2
else: probeset = uid
hv=1; pos_ref_AC = hit_data[0]; neg_ref_AC = null_data[0]
if hv!=0:
neg_coding_seq = null_data[1]; pos_coding_seq = hit_data[1]
neg_length = len(neg_coding_seq); pos_length = len(pos_coding_seq)
pos_length = float(pos_length); neg_length = float(neg_length)
if array_geneid in protein_ft_db:
protein_ft = protein_ft_db[array_geneid]
neg_ft_missing,pos_ft_missing = ExonAnalyze_module.compareProteinFeatures(protein_ft,neg_coding_seq,pos_coding_seq)
for (pos,blank,ft_name,annotation) in pos_ft_missing: domains.append(ft_name)
for (pos,blank,ft_name,annotation) in neg_ft_missing:
###If missing from the negative list, it is present in the positive state
domains.append(ft_name)
if pos_coding_seq[:5] != neg_coding_seq[:5]: function_var = 'alt-N-terminus';functional_attribute.append(function_var)
if pos_coding_seq[-5:] != neg_coding_seq[-5:]: function_var = 'alt-C-terminus';functional_attribute.append(function_var)
### Record change in peptide size
protein_length_diff = abs(pos_length-neg_length)
results = [len(functional_attribute),len(domains),protein_length_diff,pos_ref_AC,neg_ref_AC] ###number of domains, number N or C-terminal differences, protein length difference
return results
############# Code currently not used (LOCAL PROTEIN SEQUENCE ANALYSIS) ##############
def importUCSCSequenceAssociations(species,transcripts_to_analyze):
""" NOTE: This method is currently not used, by the fact that the kgXref is not downloaded from the
UCSC ftp database. This file relates mRNAs primarily to UniProt rather than GenBank and thus is less
ideal than the direct NCBI API based method used downstream """
filename = 'AltDatabase/'+species+'/SequenceData/kgXref.txt'
fn=filepath(filename); mRNA_protein_db={}
for line in open(fn,'r').readlines():
data, newline= string.split(line,'\n')
t = string.split(data,'\t')
try: mRNA_AC=t[1];prot_AC=t[2] ###Information actually seems largely redundant with what we get from UniProt direclty
except IndexError: mRNA_AC='';prot_AC=''
#if mRNA_AC == 'BC152405': print [prot_AC];kill
if len(mRNA_AC)>2 and len(prot_AC)>2:
if mRNA_AC in transcripts_to_analyze:
try: mRNA_protein_db[prot_AC].append(mRNA_AC)
except KeyError: mRNA_protein_db[prot_AC] = [mRNA_AC]
print len(mRNA_protein_db), "mRNA to protein ACs parsed from UCSC data"
missing_protein_ACs={} ### These will require in Silico translation
for mRNA_AC in transcripts_to_analyze:
if mRNA_AC not in mRNA_protein_db: missing_protein_ACs[mRNA_AC]=[]
print len(missing_protein_ACs), 'missing protein ACs for associated UCSC mRNAs and', len(mRNA_protein_db), 'found.'
return mRNA_protein_db,missing_protein_ACs
def importNCBIProteinSeq(mRNA_protein_db):
export_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-NCBIProt_sequences.txt'
export_data = export.ExportFile(export_file)
### Reverse the values and key of the dictionary
protein_linked_ACs = {}
for mRNA_AC in mRNA_protein_db:
protein_AC = mRNA_protein_db[mRNA_AC]
try: protein_linked_ACs[protein_AC].append(mRNA_AC)
except KeyError: protein_linked_ACs[protein_AC] = [mRNA_AC]
import_dir = '/AltDatabase/SequenceData' ### Multi-species fiel
g = GrabFiles(); g.setdirectory(import_dir)
seq_files = g.searchdirectory('fsa_aa')
for filename in seq_files:
fn=filepath(filename); ncbi_prot_seq_db = {}
sequence = ''
for line in open(fn,'rU').xreadlines():
try: data, newline= string.split(line,'\n')
except ValueError: continue
try:
if data[0] == '>':
if len(sequence) > 0:
if len(prot_id)>0 and prot_id in protein_linked_ACs:
mRNA_ACs = protein_linked_ACs[protein_AC]
for mRNA_AC in mRNA_ACs:
values = string.join([mRNA_AC,prot_id,sequence],'\t')+'\n'
export_data.write(values)
ncbi_prot_seq_db[mRNA_AC] = [] ###occurs once in the file
t= string.split(data,'|'); prot_id = t[3][:-2]; sequence = ''
else: sequence = ''; t= string.split(data,'|'); prot_id = t[3][:-2] ###Occurs for the first entry
except IndexError: continue
try:
if data[0] != '>': sequence = sequence + data
except IndexError: continue
export_data.close()
if len(prot_id)>0 and prot_id in protein_linked_ACs:
mRNA_ACs = protein_linked_ACs[protein_AC]
for mRNA_AC in mRNA_ACs:
values = string.join([mRNA_AC,prot_id,sequence],'\t')+'\n'
export_data.write(values)
ncbi_prot_seq_db[mRNA_AC] = [] ###Need this for the last entry
missing_protein_ACs={}
for mRNA_AC in mRNA_protein_db:
if mRNA_AC not in ncbi_prot_seq_db: missing_protein_ACs[mRNA_AC]=[]
print len(ncbi_prot_seq_db), "genbank protein ACs associated with sequence, out of", len(mRNA_protein_db)
print len(missing_protein_ACs), 'missing protein ACs for associated NCBI mRNAs and', len(ncbi_prot_seq_db), 'found.'
return missing_protein_ACs
def importUCSCSequences(missing_protein_ACs):
start_time = time.time()
filename = 'AltDatabase/'+species+'/SequenceData/mrna.fa'
output_file = 'AltDatabase/'+species+'/SequenceData/output/sequences/Transcript-InSilicoProt_sequences.txt'
datar = export.ExportFile(output_file)
output_file = 'AltDatabase/'+species+'/SequenceData/output/details/Transcript-InSilicoProt_sequences.txt'
datad = export.ExportFile(output_file)
print "Begining generic fasta import of",filename
#'>gnl|ENS|Mm#S10859962 Mus musculus 12 days embryo spinal ganglion cDNA, RIKEN full-length enriched library, clone:D130006G06 product:unclassifiable, full insert sequence /gb=AK051143 /gi=26094349 /ens=Mm.1 /len=2289']
#'ATCGTGGTGTGCCCAGCTCTTCCAAGGACTGCTGCGCTTCGGGGCCCAGGTGAGTCCCGC'
fn=filepath(filename); sequence = '|'; translated_mRNAs={}
for line in open(fn,'r').xreadlines():
try: data, newline= string.split(line,'\n')
except ValueError: continue
if len(data)>0:
if data[0] != '#':
try:
if data[0] == '>':
if len(sequence) > 1:
if accession in missing_protein_ACs:
### Perform in silico translation
mRNA_db = {}; mRNA_db[accession] = '',sequence[1:]
translation_db = BuildInSilicoTranslations(mRNA_db)
for mRNA_AC in translation_db: ### Export in silico protein predictions
protein_id, protein_seq, cds_location = translation_db[mRNA_AC]
values = [mRNA_AC,protein_id,protein_seq]; values = string.join(values,'\t')+'\n'; datar.write(values)
datad.write(mRNA_AC+'\t'+string.replace(cds_location,'..','\t')+'\n')
translated_mRNAs[mRNA_AC]=[]
### Parse new line
values = string.split(data,' '); accession = values[0][1:]; sequence = '|'; continue
except IndexError: null = []
try:
if data[0] != '>': sequence = sequence + data
except IndexError: print kill; continue
datar.close()
datad.close()
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC mRNA sequences analyzed in %d seconds" % time_diff
missing_protein_ACs_inSilico=[]
for mRNA_AC in missing_protein_ACs:
if mRNA_AC not in translated_mRNAs:
missing_protein_ACs_inSilico.append(mRNA_AC)
print len(missing_protein_ACs_inSilico), 'mRNAs without mRNA sequence NOT in silico translated (e.g., lncRNAs)',missing_protein_ACs_inSilico[:10]
return missing_protein_ACs_inSilico
############# END Code currently not used (LOCAL PROTEIN SEQUENCE ANALYSIS) ##############
def runProgram(Species,Array_type,Data_type,translate_seq,run_seqcomp):
global species; global array_type; global translate; global data_type; global test; global test_genes
species = Species; array_type = Array_type; translate = translate_seq; data_type = Data_type
test = 'no'; test_genes = ['ENSMUSG00000029467']
if array_type == 'gene' or array_type == 'exon' or data_type == 'exon':
compare_all_features = 'no'
print 'Begin Exon-based compareProteinComposition'
try: compareProteinComposition(species,array_type,translate,compare_all_features)
except Exception:
compareExonComposition(species,array_type)
compareProteinComposition(species,array_type,translate,compare_all_features)
if run_seqcomp == 'yes':
compare_all_features = 'yes'; translate = 'no'
compareProteinComposition(species,array_type,translate,compare_all_features)
elif array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq':
""" Modified on 11/26/2017(2.1.1) - integrated compareExonComposition and compareProteinComposition
with the goal of getting protein sequences for ALL UCSC and Ensembl transcripts (known + in silico)
to look at protein length differences before picking the top two compared isoforms. """
compare_all_features = 'no'
print 'Begin Junction-based compareProteinComposition'
compareProteinComposition(species,array_type,translate,compare_all_features)
if run_seqcomp == 'yes':
compare_all_features = 'yes'; translate = 'no'
compareProteinComposition(species,array_type,translate,compare_all_features)
def runProgramTest(Species,Array_type,Data_type,translate_seq,run_seqcomp):
global species; global array_type; global translate; global data_type
species = Species; array_type = Array_type; translate = translate_seq; data_type = Data_type
if array_type == 'gene' or array_type == 'exon' or data_type == 'exon':
compare_all_features = 'no'
compareProteinComposition(species,array_type,translate,compare_all_features)
if run_seqcomp == 'yes':
compare_all_features = 'yes'; translate = 'no'
compareProteinComposition(species,array_type,translate,compare_all_features)
elif array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq':
compare_all_features = 'no'
compareProteinComposition(species,array_type,translate,compare_all_features)
if run_seqcomp == 'yes':
compare_all_features = 'yes'; translate = 'no'
compareProteinComposition(species,array_type,translate,compare_all_features)
if __name__ == '__main__':
species = 'Mm'; array_type = 'AltMouse'; translate='no'; run_seqcomp = 'no'; data_type = 'exon'
species = 'Mm'; array_type = 'RNASeq'; translate='yes'
#species = 'Dr'; array_type = 'RNASeq'; translate='yes'; data_type = 'junction'
test='no'
a = ['ENSMUST00000138102', 'ENSMUST00000193415', 'ENSMUST00000124412', 'ENSMUST00000200569']
importEnsemblTranscriptSequence(a);sys.exit()
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt'
ens_transcript_exon_db,ens_gene_transcript_db,ens_gene_exon_db = importEnsExonStructureDataSimple(filename,species,{},{},{},{})
#print len(ens_transcript_exon_db), len(ens_gene_transcript_db), len(ens_gene_exon_db);sys.exit()
#runProgramTest(species,array_type,data_type,translate,run_seqcomp)
runProgram(species,array_type,data_type,translate,run_seqcomp)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/IdentifyAltIsoforms.py
|
IdentifyAltIsoforms.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
sys.setrecursionlimit(5000)
import export
_script = 'AltAnalyze.py'
_appName = "AltAnalyze"
_appVersion = '2.1.3'
_appDescription = "AltAnalyze is a freely available, open-source and cross-platform program that allows you to processes raw bulk or single-cell RNASeq and "
_appDescription +="microarray data, identify predicted alternative splicing or alternative promoter changes and "
_appDescription +="view how these changes may affect protein sequence, domain composition, and microRNA targeting."
_authorName = 'Nathan Salomonis'
_authorEmail = '[email protected]'
_authorURL = 'http://www.altanalyze.org'
_appIcon = "build_scripts/AltAnalyze_W7.ico"
excludes = ['wx','tests','iPython'] #["wxPython"] #"numpy","scipy","matplotlib"
includes = ["mpmath", "numpy","sklearn.neighbors.typedefs",'sklearn.utils.lgamma','sklearn.manifold',
'sklearn.utils.sparsetools._graph_validation','sklearn.utils.weight_vector',
'pysam.TabProxies','pysam.ctabixproxies','patsy.builtins','dbhash','anydbm']
""" By default, suds will be installed in site-packages as a .egg file (zip compressed). Make a duplicate, change to .zip and extract
here to allow it to be recognized by py2exe (must be a directory) """
matplot_exclude = [] #['MSVCP90.dll']
scipy_exclude = [] #['libiomp5md.dll','libifcoremd.dll','libmmd.dll']
""" xml.sax.drivers2.drv_pyexpat is an XML parser needed by suds that py2app fails to include. Identified by looking at the line: parser_list+self.parsers in
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/PyXML-0.8.4-py2.7-macosx-10.6-intel.egg/_xmlplus/sax/saxexts.py
check the py2app print out to see where this file is in the future
(reported issue - may or may not apply) For mac and igraph, core.so must be copied to a new location for py2app:
sudo mkdir /System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/lib-dynload/igraph/
cp /Library/Python/2.6/site-packages/igraph/core.so /System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/lib-dynload/igraph/
May require:
python build_scripts/setup_binary.py py2app --frameworks /Library/Python/2.7/site-packages/llvmlite/binding/libllvmlite.dylib --packages llvmlite,numba
"""
if sys.platform.startswith("darwin"):
### Local version: /usr/local/bin/python2.6
### example command: python setup.py py2app
from distutils.core import setup
import py2app
import lxml
import sklearn
import PIL._imaging
import PIL._imagingft
#import macholib_patch
includes+= ["pkg_resources","distutils","lxml.etree","lxml._elementpath"] #"xml.sax.drivers2.drv_pyexpat"
frameworks = ['/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/PIL']
frameworks += ['/Library/Python/2.7/site-packages/llvmlite/binding/libllvmlite.dylib',
'/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/PIL/.dylibs/libopenjp2.2.1.0.dylib']
"""
resources = ['/System/Library/Frameworks/Python.framework/Versions/2.7']
frameworks = ['/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7/pyconfig.h']
frameworks += ['/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/pkg_resources/__init__.py']
frameworks += ['/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/distutils/util.py']
frameworks += ['/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/distutils/sysconfig.py']
import pkg_resources
import distutils
import distutils.sysconfig
import distutils.util
"""
options = {"py2app":
{"excludes": excludes,
"includes": includes,
#"frameworks": frameworks,
#"resources": resources,
#"argv_emulation": True,
"iconfile": "build_scripts/altanalyze.icns"}
}
setup(name=_appName,
app=[_script],
version=_appVersion,
description=_appDescription,
author=_authorName,
author_email=_authorEmail,
url=_authorURL,
options=options,
#data_files=data_files,
setup_requires=["py2app"]
)
import UI
import shutil
scr_root = '/Users/saljh8/Documents/GitHub/accessory/.dylibs'
des_root = '/Users/saljh8/Documents/GitHub//altanalyze/dist/AltAnalyze.app/Contents/Resources/lib/python2.7/lib-dynload/PIL/.dylibs/'
os.mkdir(des_root)
files = UI.read_directory(scr_root[:-1])
for file in files:
shutil.copy(scr_root+file,des_root+file)
if sys.platform.startswith("win"):
### example command: python setup.py py2exe
from distutils.core import setup
import py2exe
import numpy
import matplotlib
import unique
import lxml
import sys
import sklearn
import pysam
import TabProxies
import ctabix
import csamtools
import cvcf
from mpl_toolkits import mplot3d
import dbhash
import matplotlib.backends.backend_tkagg
import anydbm
import six ### relates to a date-time dependency in matplotlib
#sys.path.append(unique.filepath("Config\DLLs")) ### This is added, but DLLs still require addition to DLL python dir
from distutils.filelist import findall
import os
import mpl_toolkits
excludes = []
data_files=matplotlib.get_py2exe_datafiles()
matplotlibdatadir = matplotlib.get_data_path()
matplotlibdata = findall(matplotlibdatadir)
matplotlibdata_files = []
data_files += ['C:\Python27\Lib\site-packages\scipy\extra-dll\msvcr90.dll','C:\Python27\Lib\site-packages\scipy\extra-dll\msvcp90.dll','C:\Python27\Lib\site-packages\scipy\extra-dll\msvcm90.dll']
for f in matplotlibdata:
dirname = os.path.join('matplotlibdata', f[len(matplotlibdatadir)+1:])
matplotlibdata_files.append((os.path.split(dirname)[0], [f]))
windows=[{"script":_script,"icon_resources":[(1,_appIcon)]}]
options={'py2exe':
{
"includes": 'lxml',
'includes': 'pysam',
'includes': 'TabProxies',
'includes': 'csamtools',
'includes': 'ctabix',
'includes': 'lxml.etree',
'includes': 'lxml._elementpath',
"includes": 'matplotlib',
"includes": 'mpl_toolkits',
"includes": 'matplotlib.backends.backend_tkagg',
"includes": 'mpl_toolkits.mplot3d',
"includes": 'scipy._lib.messagestream',
"includes": 'scipy.special._ufuncs_cxx',
"includes": 'sklearn.neighbors.typedefs',
"includes": 'sklearn.neighbors.ball_tree',
"includes": 'sklearn.utils.lgamma',
"includes": 'sklearn.linear_model.sgd_fast',
"includes": 'scipy.special.cython_special',
"includes": 'llvmlite.binding.ffi',
"includes": 'numba.config',
#'includes': 'sklearn.neighbors.typedefs',
#'includes': 'sklearn.utils.lgamma',
#"includes": 'sklearn.utils.sparsetools._graph_validation',
#"includes": 'sklearn.utils.weight_vector',
#"includes": 'sklearn.manifold',
"dll_excludes": matplot_exclude+scipy_exclude,
}}
setup(
#console = windows,
windows = windows,
options = options,
version=_appVersion,
description=_appDescription,
author=_authorName,
author_email=_authorEmail,
url=_authorURL,
data_files=matplotlibdata_files+data_files,
)
if sys.platform.startswith("2linux"):
# bb_setup.py
from bbfreeze import Freezer
f = Freezer(distdir="bb-binary")
f.addScript("AltAnalyze.py")
f()
if sys.platform.startswith("2linux"):
# bb_setup.py
from bbfreeze import Freezer
f = Freezer(distdir="bb-binary")
f.addScript("AltAnalyze.py")
f()
if sys.platform.startswith("linux"):
### example command: python setup.py build
includes = ['matplotlib','mpl_toolkits','matplotlib.backends.backend_tkagg']
includefiles = []
from cx_Freeze import setup, Executable
### use to get rid of library.zip and move into the executable, along with appendScriptToLibrary and appendScriptToExe
#buildOptions = dict(create_shared_zip = False)
setup(
name = _appName,
version=_appVersion,
description=_appDescription,
author=_authorName,
author_email=_authorEmail,
url=_authorURL,
#options = dict(build_exe = buildOptions),
options = {"build_exe": {"includes":includes, "include_files": includefiles}},
executables = [Executable(_script,
#appendScriptToExe=True,
#appendScriptToLibrary=False,
#icon='goelite.ico',
compress=True)],
)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/build_scripts/setup_binary.py
|
setup_binary.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
def importFPKMFile(input_file):
added_key={}
firstLine = True
for line in open(input_file,'rU').xreadlines():
data = line.rstrip('\n')
t = string.split(data,'\t')
if firstLine:
firstLine = False
else:
try: geneID= t[0]; symbol=t[4]; position=t[6]; fpkm = t[9]
except Exception: geneID= t[0]; symbol=t[1]; position=t[2]; fpkm = t[4]
if 'chr' not in position: position = 'chr'+position
try:
null=position_symbol_db[position]
if len(symbol)>1: position_symbol_db[position].append(symbol)
except Exception: position_symbol_db[position] = [symbol]
try:
null=position_gene_db[position]
if '.' not in geneID: position_gene_db[position].append(geneID)
except Exception: position_gene_db[position] = [geneID]
if IDType == 'symbol':
if symbol!= '-': geneID = symbol
if IDType == 'position':
geneID = position
if getData:
try: fpkm_db[geneID].append(fpkm)
except Exception: fpkm_db[geneID] = [fpkm]
added_key[geneID]=[]
else:
fpkm_db[geneID]=[]
added_key[geneID]=[]
for i in fpkm_db:
if i not in added_key:
fpkm_db[i].append('0.00')
def getFiles(sub_dir,directories=True):
dir_list = os.listdir(sub_dir); dir_list2 = []
for entry in dir_list:
if directories:
if '.' not in entry: dir_list2.append(entry)
else:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def combineCufflinks(root_dir,gene_type,id_type):
global fpkm_db
global headers
global IDType; IDType = id_type
global position_symbol_db; position_symbol_db={}
global position_gene_db; position_gene_db={}
global getData; getData=False
fpkm_db={}; headers=['GeneID']
folders = getFiles(root_dir,True)
for folder in folders:
l1 = root_dir+'/'+folder
"""
files = getFiles(l1,False)
for file in files:
filename = l1+'/'+file
if gene_type in file and '.gz' not in file:
print filename
importFPKMFile(filename)
headers.append(folder)
break
break
"""
folders2 = getFiles(l1,True)
for folder in folders2:
l2 = l1+'/'+folder
files = getFiles(l2,False)
for file in files:
filename = l2+'/'+file
if gene_type in file and '.gz' not in file:
print filename
importFPKMFile(filename)
headers.append(folder)
for i in fpkm_db:
fpkm_db[i]=[]
getData=True
headers=['UID']
if IDType == 'position':
headers=['Position','GeneID','Symbol']
for folder in folders:
l1 = root_dir+'/'+folder
"""
files = getFiles(l1,False)
for file in files:
filename = l1+'/'+file
if gene_type in file and '.gz' not in file:
print filename
importFPKMFile(filename)
headers.append(folder)
break
break
"""
folders2 = getFiles(l1,True)
for folder in folders2:
l2 = l1+'/'+folder
files = getFiles(l2,False)
for file in files:
filename = l2+'/'+file
if gene_type in file and '.gz' not in file:
print filename
importFPKMFile(filename)
headers.append(folder)
if IDType == 'position':
for position in position_gene_db:
gene = '-'
#print position,position_gene_db[position]
for i in position_gene_db[position]:
if '.' not in i: gene = i
position_gene_db[position] = gene
#print position,position_gene_db[position],'\n'
#print position,position_symbol_db[position]
symbol = '-'
for i in position_symbol_db[position]:
if i != '-': symbol = i
position_symbol_db[position] = symbol
#print position,position_symbol_db[position]
#print position_symbol_db[position]
export_object = open(root_dir+'/'+gene_type+'-Cufflinks.txt','w')
headers = string.join(headers,'\t')+'\n'
export_object.write(headers)
for geneID in fpkm_db:
values = map(str,fpkm_db[geneID])
if IDType != 'position':
values = string.join([geneID]+values,'\t')+'\n'
else:
values = string.join([geneID,position_gene_db[geneID],position_symbol_db[geneID]]+values,'\t')+'\n'
export_object.write(values)
export_object.close()
def gunzipfiles(root_dir):
import gzip
import shutil;
folders = getFiles(root_dir,True)
for folder in folders:
l1 = root_dir+'/'+folder
files = getFiles(l1,False)
for file in files:
filename = l1+'/'+file
if 'genes.fpkm_tracking' in filename:
content = gzip.GzipFile(filename, 'rb')
decompressed_filepath = string.replace(filename,'.gz','')
data = open(decompressed_filepath,'wb')
shutil.copyfileobj(content,data)
if __name__ == '__main__':
#gunzipfiles('/Users/saljh8/Downloads/6b_CUFFLINKS_output/');sys.exit()
#combineCufflinks('/Users/saljh8/Downloads/6b_CUFFLINKS_output/');sys.exit()
type = 'genes'
id_type = 'position'
################ Comand-line arguments ################
import getopt
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','GeneType=', 'IDType='])
for opt, arg in options:
if opt == '--i': input_dir=arg
if opt == '--GeneType': type=arg
if opt == '--IDType': id_type=arg
combineCufflinks(input_dir,type,id_type)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/combineCufflinks.py
|
combineCufflinks.py
|
###cufflinks_import
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import gzip
import getopt
def read_directory(sub_dir):
dir_list = os.listdir(sub_dir)
dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def getFiles(sub_dir):
dir_list = os.listdir(sub_dir)
dir_list2 = []
###Only get folder names
for entry in dir_list:
dir_list2.append(entry)
return dir_list2
def zipDirectory(dir):
#http://www.testingreflections.com/node/view/8173
import zipfile
zip_file = dir+'.zip'
p = string.split(dir,'/'); top=p[-1]
zip = zipfile.ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED)
root_len = len(os.path.abspath(dir))
for root, dirs, files in os.walk(dir):
archive_root = os.path.abspath(root)[root_len:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(top+archive_root, f)
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
return zip_file
def unzipFiles(filename,dir):
import zipfile
output_filepath = dir+filename
try:
zfile = zipfile.ZipFile(output_filepath)
for name in zfile.namelist():
if name.endswith('/'):null=[] ### Don't need to export
else:
try: outfile = open(dir+name,'w')
except Exception:
outfile = open(dir+name[1:],'w')
outfile.write(zfile.read(name)); outfile.close()
#print 'Zip extracted to:',output_filepath
status = 'completed'
except Exception, e:
try:
### Use the operating system's unzip if all else fails
extracted_path = string.replace(output_filepath,'.zip','')
try: os.remove(extracted_path) ### This is necessary, otherwise the empty file created above will require user authorization to delete
except Exception: null=[]
subprocessUnzip(dir,output_filepath)
status = 'completed'
except IOError:
print e
print 'WARNING!!!! The zip file',output_filepath,'does not appear to be a valid zip archive file or is currupt.'
status = 'failed'
return status
def gunzip():
import gzip; content = gzip.GzipFile(gz_filepath, 'rb')
data = open(decompressed_filepath,'wb')
import shutil; shutil.copyfileobj(content,data)
def importCufflinksDir(directory):
root_dir_files = getFiles(directory)
global sample_FPKM_db
sample_FPKM_db={}
for sample in root_dir_files: ###
if 'fpkm_tracking' in sample:
x_db,transcript_db = readFPKMs(directory+'/'+sample)
sample_FPKM_db.update(x_db)
### Below occurs if it is a directory of FPKM results with the folder name as the sample
try:
files = getFiles(directory+'/'+sample)
for file in files:
if 'fpkm_tracking' in file:
x_db,transcript_db = readFPKMs(directory+'/'+sample+'/'+file)
sample_FPKM_db.update(x_db)
except Exception: pass
### Get the samples
samples = sample_FPKM_db.keys()
samples.sort()
### Get the transcripts
gene_fpkm_db={}
for sample in samples:
fpkm_db = sample_FPKM_db[sample]
for gene in fpkm_db:
fpkm = fpkm_db[gene]
try: gene_fpkm_db[gene].append(fpkm)
except Exception: gene_fpkm_db[gene] = [fpkm]
export_object = open(directory+'/Cufflinks.txt','w')
headers = string.join(['UID']+samples,'\t')+'\n'
export_object.write(headers)
for geneID in gene_fpkm_db:
values = map(str,gene_fpkm_db[geneID])
values = string.join([geneID]+values,'\t')+'\n'
export_object.write(values)
export_object.close()
def findFilename(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'//','/') ### If /// present
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[x:]
def readFPKMs(path):
if '.gz' in path:
f=gzip.open(path,'rb')
else:
f=open(path,"rU")
file_content=f.read()
fpkm_data = string.split(file_content,'\n')
sample = findFilename(path)
if 'fpkm_tracking' in sample:
sample = string.split(sample,'.fpkm_tracking')[0]
sample = string.replace(sample,'.sorted.genes','')
fpkm_db={}
transcript_db={}
firstLine=True
row_count=0
for line in fpkm_data:
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
try:
track_i = t.index('tracking_id')
gene_i = t.index('gene_id')
fpkm_i = t.index('FPKM')
except Exception:
fpkm_i = 9
gene_i = 3
row_count = 1
firstLine = False
if firstLine == False and row_count>0:
if len(t)>1:
geneID = t[gene_i]
transcriptID = t[gene_i]
fpkm = t[fpkm_i]
fpkm_db[transcriptID] = float(fpkm)
transcript_db[transcriptID] = geneID
row_count+=1
sample_FPKM_db[sample] = fpkm_db
return sample_FPKM_db,transcript_db
if __name__ == "__main__":
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a directory of fpkm_tracking file as input in the command-line"
print "Example: python cufflinks_import.py --i /Users/cufflinks/"
sys.exit()
else:
Species = None
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species='])
for opt, arg in options:
if opt == '--i': dir=arg ### full path of a BAM file
elif opt == '--species': Species=arg ### full path of a BAM file
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
try: importCufflinksDir(dir)
except ZeroDivisionError:
print [sys.argv[1:]],'error'; error
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/cufflinks_import.py
|
cufflinks_import.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This script can be run on its own to extract a single BAM file at a time or
indirectly by multiBAMtoBED.py to extract junction.bed files (Tophat format)
from many BAM files in a single directory at once. Currently uses the Tophat
predicted Strand notation opt('XS') for each read. This can be substituted with
strand notations from other aligners (check with the software authors).
This code is compatible with psyam or bamnostic, but is 77x faster with pysam
(12 seconds versus 927 seconds). """
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import bamnostic as pysam
import copy,getopt
import time
import traceback
try: import export
except Exception: pass
try: import unique
except Exception: pass
def getSpliceSites(cigarList,X):
cummulative=0
coordinates=[]
for (code,seqlen) in cigarList:
if code == 0:
cummulative+=seqlen
if code == 3:
#if strand == '-':
five_prime_ss = str(X+cummulative)
cummulative+=seqlen ### add the intron length
three_prime_ss = str(X+cummulative+1) ### 3' exon start (prior exon splice-site + intron length)
coordinates.append([five_prime_ss,three_prime_ss])
up_to_intron_dist = cummulative
return coordinates, up_to_intron_dist
def writeJunctionBedFile(junction_db,jid,o):
strandStatus = True
for (chr,jc,tophat_strand) in junction_db:
if tophat_strand==None:
strandStatus = False
break
if strandStatus== False: ### If no strand information in the bam file filter and add known strand data
junction_db2={}
for (chr,jc,tophat_strand) in junction_db:
original_chr = chr
if 'chr' not in chr:
chr = 'chr'+chr
for j in jc:
try:
strand = splicesite_db[chr,j]
junction_db2[(original_chr,jc,strand)]=junction_db[(original_chr,jc,tophat_strand)]
except Exception: pass
junction_db = junction_db2
for (chr,jc,tophat_strand) in junction_db:
x_ls=[]; y_ls=[]; dist_ls=[]
read_count = str(len(junction_db[(chr,jc,tophat_strand)]))
for (X,Y,dist) in junction_db[(chr,jc,tophat_strand)]:
x_ls.append(X); y_ls.append(Y); dist_ls.append(dist)
outlier_start = min(x_ls); outlier_end = max(y_ls); dist = str(max(dist_ls))
exon_lengths = outlier_start
exon_lengths = str(int(jc[0])-outlier_start)+','+str(outlier_end-int(jc[1])+1)
junction_id = 'JUNC'+str(jid)+':'+jc[0]+'-'+jc[1] ### store the unique junction coordinates in the name
output_list = [chr,str(outlier_start),str(outlier_end),junction_id,read_count,tophat_strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
o.write(string.join(output_list,'\t')+'\n')
def writeIsoformFile(isoform_junctions,o):
for coord in isoform_junctions:
isoform_junctions[coord] = unique.unique(isoform_junctions[coord])
if '+' in coord:
print coord, isoform_junctions[coord]
if '+' in coord:
sys.exit()
def verifyFileLength(filename):
count = 0
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def retreiveAllKnownSpliceSites(returnExonRetention=False,DesignatedSpecies=None,path=None):
### Uses a priori strand information when none present
import export, unique
chromosomes_found={}
try: parent_dir = export.findParentDir(bam_file)
except Exception: parent_dir = export.findParentDir(path)
species = None
for file in os.listdir(parent_dir):
if 'AltAnalyze_report' in file and '.log' in file:
log_file = unique.filepath(parent_dir+'/'+file)
log_contents = open(log_file, "rU")
species_tag = ' species: '
for line in log_contents:
line = line.rstrip()
if species_tag in line:
species = string.split(line,species_tag)[1]
if species == None:
try: species = IndicatedSpecies
except Exception: species = DesignatedSpecies
splicesite_db={}
gene_coord_db={}
try:
if ExonReference==None:
exon_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
length = verifyFileLength(exon_dir)
except Exception:
#print traceback.format_exc();sys.exit()
length = 0
if length==0:
exon_dir = ExonReference
refExonCoordinateFile = unique.filepath(exon_dir)
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
spliceEvent = t[-2]
#start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
try:
gene_coord_db[geneID,chr].append(int(start))
gene_coord_db[geneID,chr].append(int(stop))
except Exception:
gene_coord_db[geneID,chr] = [int(start)]
gene_coord_db[geneID,chr].append(int(stop))
if returnExonRetention:
if 'exclusion' in spliceEvent or 'exclusion' in spliceEvent:
splicesite_db[geneID+':'+exon]=[]
else:
splicesite_db[chr,start]=strand
splicesite_db[chr,stop]=strand
if len(chr)<5 or ('GL0' not in chr and 'GL' not in chr and 'JH' not in chr and 'MG' not in chr):
chromosomes_found[string.replace(chr,'chr','')] = []
for i in gene_coord_db:
gene_coord_db[i].sort()
gene_coord_db[i] = [gene_coord_db[i][0],gene_coord_db[i][-1]]
return splicesite_db,chromosomes_found,gene_coord_db
def exportIndexes(input_dir):
import unique
bam_dirs = unique.read_directory(input_dir)
print 'Building BAM index files',
for file in bam_dirs:
if string.lower(file[-4:]) == '.bam':
bam_dir = input_dir+'/'+file
bamf = pysam.AlignmentFile(bam_dir, "rb" )
### Is there an indexed .bai for the BAM? Check.
try:
for entry in bamf.fetch():
codes = map(lambda x: x[0],entry.cigar)
break
except Exception:
### Make BAM Indexv lciv9df8scivx
print '.',
bam_dir = str(bam_dir)
#On Windows, this indexing step will fail if the __init__ pysam file line 51 is not set to - catch_stdout = False
pysam.index(bam_dir)
def parseJunctionEntries(bam_dir,multi=False, Species=None, ReferenceDir=None):
global bam_file
global splicesite_db
global IndicatedSpecies
global ExonReference
IndicatedSpecies = Species
ExonReference = ReferenceDir
bam_file = bam_dir
try: splicesite_db,chromosomes_found, gene_coord_db = retreiveAllKnownSpliceSites()
except Exception:
print traceback.format_exc()
splicesite_db={}; chromosomes_found={}
start = time.time()
try: import collections; junction_db=collections.OrderedDict()
except Exception:
try: import ordereddict; junction_db = ordereddict.OrderedDict()
except Exception: junction_db={}
original_junction_db = copy.deepcopy(junction_db)
bam_index = os.path.isfile(bam_dir+'.bai')
if bam_index==False:
if multi == False:
print 'Building BAM index file for', bam_dir
from pysam import index
index(bam_dir)
bamf = pysam.AlignmentFile(bam_dir, "rb" )
chromosome = False
chromosomes={}
bam_reads=0
count=0
jid = 1
prior_jc_start=0
l1 = None; l2=None
o = open (string.replace(bam_dir,'.bam','__junction.bed'),"w")
o.write('track name=junctions description="TopHat junctions"\n')
export_isoform_models = False
if export_isoform_models:
io = open (string.replace(bam_dir,'.bam','__isoforms.txt'),"w")
isoform_junctions = copy.deepcopy(junction_db)
outlier_start = 0; outlier_end = 0; read_count = 0; c=0
for entry in bamf:
bam_reads+=1
cigarstring = entry.cigarstring
if cigarstring != None:
if 'N' in cigarstring: ### Hence a junction
if prior_jc_start == 0: pass
elif (entry.pos-prior_jc_start) > 5000 or entry.reference_name != chromosome: ### New chr or far from prior reads
writeJunctionBedFile(junction_db,jid,o)
#writeIsoformFile(isoform_junctions,io)
junction_db = copy.deepcopy(original_junction_db) ### Re-set this object
jid+=1
chromosome = entry.reference_name
chromosomes[chromosome]=[] ### keep track
X=entry.reference_start
#if entry.query_name == 'SRR791044.33673569':
#print chromosome, entry.pos, entry.reference_length, entry.alen, entry.query_name
Y=entry.reference_start+entry.reference_length
prior_jc_start = X
try: tophat_strand = entry.get_tag('XS') ### TopHat knows which sequences are likely real splice sites so it assigns a real strand to the read
except Exception:
#if multi == False: print 'No TopHat strand information';sys.exit()
tophat_strand = None
coordinates,up_to_intron_dist = getSpliceSites(entry.cigar,X)
#if count > 100: sys.exit()
#print entry.query_name,X, Y, entry.cigarstring, entry.cigar, tophat_strand
for (five_prime_ss,three_prime_ss) in coordinates:
jc = five_prime_ss,three_prime_ss
#print X, Y, jc, entry.cigarstring, entry.cigar
try: junction_db[chromosome,jc,tophat_strand].append([X,Y,up_to_intron_dist])
except Exception: junction_db[chromosome,jc,tophat_strand] = [[X,Y,up_to_intron_dist]]
if export_isoform_models:
try:
mate = bamf.mate(entry) #https://groups.google.com/forum/#!topic/pysam-user-group/9HM6nx_f2CI
if 'N' in mate.cigarstring:
mate_coordinates,mate_up_to_intron_dist = getSpliceSites(mate.cigar,mate.pos)
else: mate_coordinates=[]
except Exception: mate_coordinates=[]
#print coordinates,mate_coordinates
junctions = map(lambda x: tuple(x),coordinates)
if len(mate_coordinates)>0:
try:
isoform_junctions[chromosome,tuple(junctions),tophat_strand].append(mate_coordinates)
except Exception:
isoform_junctions[chromosome,tuple(junctions),tophat_strand] = [mate_coordinates]
else:
if (chromosome,tuple(junctions),tophat_strand) not in isoform_junctions:
isoform_junctions[chromosome,tuple(junctions),tophat_strand] = []
count+=1
writeJunctionBedFile(junction_db,jid,o) ### One last read-out
if multi == False:
print bam_reads, count, time.time()-start, 'seconds required to parse the BAM file'
o.close()
bamf.close()
missing_chromosomes=[]
for chr in chromosomes_found:
if chr not in chromosomes:
chr = string.replace(chr,'chr','')
if chr not in chromosomes_found:
if chr != 'M' and chr != 'MT':
missing_chromosomes.append(chr)
#missing_chromosomes = ['A','B','C','D']
try: bam_file = export.findFilename(bam_file)
except Exception: pass
return bam_file, missing_chromosomes
if __name__ == "__main__":
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoJunctionBED.py --i /Users/me/sample1.bam"
sys.exit()
else:
Species = None
reference_dir = None
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species=','r='])
for opt, arg in options:
if opt == '--i': bam_dir=arg ### full path of a BAM file
elif opt == '--species': Species=arg ### species for STAR analysis to get strand
elif opt == '--r': reference_dir=arg ### An exon.bed reference file (created by AltAnalyze from junctions, multiBAMtoBED.py or other) - required for STAR to get strand if XS field is empty
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
try: parseJunctionEntries(bam_dir,Species=Species,ReferenceDir=reference_dir)
except ZeroDivisionError:
print [sys.argv[1:]],'error'; error
""" Benchmarking notes: On a 2017 MacBook Pro with 16GB of RAM and a local 7GB BAM file (solid drive), 9 minutes (526s) to complete writing a junction.bed.
To simply search through the file without looking at the CIGAR, the script takes close to 5 minutes (303s)"""
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BAMtoJunctionBED_alt.py
|
BAMtoJunctionBED_alt.py
|
import sys, string
import os.path
import unique
import copy
import time
import math
import export
from xlrd import open_workbook
import traceback
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
""" Methods for reading Excel files """
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importExcelFile(filename, column=False, top=None):
""" Iterate through each tab to get relevant data """
from xlrd import open_workbook
worksheet_data={}
results = False
wb = open_workbook(filename)
for s in wb.sheets():
worksheet = s.name
rows=[]
count=0
for row in range(s.nrows):
count+=1
values = []
if column!=False: ### Return results from a single designated column number
if top!=None:
if count>top:
continue
try: values.append(str(s.cell(row,column-1).value))
except Exception: pass
else:
for col in range(s.ncols):
if top!=None:
if count>top:
continue
try: values.append(str(s.cell(row,col).value))
except Exception: pass
rows.append(values)
worksheet_data[worksheet]=rows
return worksheet_data
def processWorksheetMarkers(worksheet_data):
""" Compile the ID to category results """""
compiled_results=[]
for worksheet in worksheet_data:
firstRow=True
for value in worksheet_data[worksheet]:
if firstRow:
category = value
firstRow = False
else:
compiled_results.append([category,value])
return compiled_results
def exportCompiledResults(output,compiled_results):
export_object = open(output,'w')
for (category,value) in compiled_results:
try: export_object.write(category[0]+'\t'+value[0]+'\n')
except: pass
export_object.close()
def computeSignEnrichmentScore(reference,query,useMaxLength=False,randomize=False):
""" Given the total number of features in the reference signature and
the total in the compared query (e.g., folds, dPSI values), compare the
sign of the feature in the reference and query to compute an enrichment
score """
ref_len = len(reference)
query_len = len(query)
downScore = 0
upScore = 0
for feature in query:
if feature in reference:
ref_sign = reference[feature]
if randomize:
signs = ['-','+']
query_sign = random.shuffle(signs)[0]
else:
query_sign = query[feature]
if ref_sign == '-':
if query_sign == '-':
downScore+=1
else:
downScore-=1
elif ref_sign == '+':
if query_sign == '+':
upScore+=1
else:
upScore-=1
if useMaxLength:
ref_len = max(ref_len,query_len)
upScore = upScore/(ref_len*1.000)
downScore = downScore/(ref_len*1.000)
Score1 = (upScore+downScore)*0.5
if Score1>0: sign = 1
else: sign = -1
Score1T = sign* math.log(1+(100*abs(Score1)),10)
return Score1T
def computeSignPval(score_db,random_db):
from scipy.stats import norm
stats_db = {}
for (ref,query) in score_db:
Score1T = score_db[(ref,query)]
RScore_stdev = numpy.std(random_db[(ref,query)])
z = (Score1T)/RScore_stdev
p = 2* (1-norm.cdf(z))
stats_db[ref,query] = z,p
return stats_db
if __name__ == '__main__':
import getopt
top = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Insufficient command line flags supplied."
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['xls=','i=','column=','output=','o=','top='])
for opt, arg in options:
if opt == '--xls' or opt == '--i': xls=arg
if opt == '--output' or opt == '--o': output=arg
if opt == '--top': top=int(arg)
if opt == '--column':
try: column = int(arg)
except: column = arg
worksheet_data = importExcelFile(xls,column=column,top=top)
compiled_results = processWorksheetMarkers(worksheet_data)
exportCompiledResults(output,compiled_results)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/ReadXLS.py
|
ReadXLS.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import itertools
dirfile = unique
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def returnDirectories(sub_dir):
dir_list = unique.returnDirectories(sub_dir)
return dir_list
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if '.' in data:
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir and '.' in data: matches.append(data_dir)
return matches
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineAllLists(files_to_merge,original_filename,includeColumns=False):
headers =[]; all_keys={}; dataset_data={}; files=[]; filter_keys={}
for filename in files_to_merge:
print filename
duplicates=[]
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
if includeColumns==False:
headers.append(file)
#for i in t: headers.append(i+'.'+file); #headers.append(i)
else:
headers.append(t[includeColumns]+'.'+file)
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
try: key = t[0]+':'+t[1]+' '+t[3]+'|'+t[5]; t = [key,t[2]]
except Exception: print key;sys.exit()
if includeColumns==False:
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
else:
values = [t[includeColumns]]
#key = string.replace(key,' ','')
if len(key)>0 and key != ' ' and key not in combined_data: ### When the same key is present in the same dataset more than once
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
v = float(values[0])
if permform_all_pairwise == 'yes':
try: combined_data[key].append(values); duplicates.append(key)
except Exception: combined_data[key] = [values]
else:
combined_data[key] = values
if float(t[1])>0.15: filter_keys[key]=[]
#print duplicates
dataset_data[filename] = combined_data
for i in dataset_data:
print len(dataset_data[i]), i
print 'filter_keys',len(filter_keys)
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data:
number_of_values = len(combined_data[key][0]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
if key in filter_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError:
values = null_values
if permform_all_pairwise == 'yes':
values = [null_values]
if permform_all_pairwise == 'yes':
try:
val_list = combined_file_data[key]
val_list.append(values)
combined_file_data[key] = val_list
except KeyError: combined_file_data[key] = [values]
else:
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedVariants.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
if permform_all_pairwise == 'yes':
results = getAllListCombinations(combined_file_data[key])
for result in results:
merged=[]
for i in result: merged+=i
merged2 = map(float,merged)
if max(merged2)>1:
merged=[]
max_val = max(merged2)
for i in merged2:
try: i = i/max_val
except Exception: pass
merged.append(str(i))
values = string.join([key]+merged,'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
else:
try:
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
except Exception: print combined_file_data[key];sys.exit()
data.close()
print "exported",len(dataset_data),"to",export_file
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def getAllListCombinationsLong(a):
ls1 = ['a1','a2','a3']
ls2 = ['b1','b2','b3']
ls3 = ['c1','c2','c3']
ls = ls1,ls2,ls3
list_len_db={}
for ls in a:
list_len_db[len(x)]=[]
print len(list_len_db), list_len_db;sys.exit()
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r+=i
else:
#http://code.activestate.com/recipes/496807-list-of-all-combination-from-multiple-lists/
r=[[]]
for x in a:
t = []
for y in x:
for i in r:
t.append(i+[y])
r = t
return r
def combineUniqueAllLists(files_to_merge,original_filename):
headers =[]; all_keys={}; dataset_data={}; files=[]
for filename in files_to_merge:
print filename
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
for i in t:
headers.append(i+'.'+file)
if x==0:
if data[0]!='#':
x=1;
headers+=t[1:] ###Occurs for the header line
headers+=['null']
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
#key = string.replace(key,' ','')
combined_data[key] = values
if len(key)>0 and key != ' ':
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
dataset_data[filename] = combined_data
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError: values = null_values
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
data.close()
print "exported",len(dataset_data),"to",export_file
def getAllListCombinations(a):
#http://www.saltycrane.com/blog/2011/11/find-all-combinations-set-lists-itertoolsproduct/
""" Nice code to get all combinations of lists like in the above example, where each element from each list is represented only once """
list_len_db={}
for x in a:
list_len_db[len(x)]=[]
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r.append(i[0])
return [r]
else:
return list(itertools.product(*a))
def joinFiles(files_to_merge,CombineType,unique_join,outputDir):
""" Join multiple files into a single output file """
global combine_type
global permform_all_pairwise
global output_dir
output_dir = outputDir
combine_type = string.lower(CombineType)
permform_all_pairwise = 'yes'
print 'combine type:',combine_type
print 'join type:', unique_join
#g = GrabFiles(); g.setdirectory(import_dir)
#files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if unique_join:
combineUniqueAllLists(files_to_merge,'')
else:
combineAllLists(files_to_merge,'')
return output_dir+'/MergedFiles.txt'
if __name__ == '__main__':
dirfile = unique
includeColumns=-2
includeColumns = False
output_dir = filepath('output')
combine_type = 'union'
permform_all_pairwise = 'yes'
print "Analysis Mode:"
print "1) Batch Analysis"
print "2) Single Output"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": batch_mode = 'yes'
elif inp == "2": batch_mode = 'no'
print "Combine Lists Using:"
print "1) Grab Union"
print "2) Grab Intersection"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": combine_type = 'union'
elif inp == "2": combine_type = 'intersection'
if batch_mode == 'yes': import_dir = '/batch/general_input'
else: import_dir = '/input'
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if batch_mode == 'yes':
second_import_dir = '/batch/primary_input'
g = GrabFiles(); g.setdirectory(second_import_dir)
files_to_merge2 = g.searchdirectory('xyz') ###made this a term to excluded
for file in files_to_merge2:
temp_files_to_merge = customLSDeepCopy(files_to_merge)
original_filename = string.split(file,'/'); original_filename = original_filename[-1]
temp_files_to_merge.append(file)
if '.' in file:
combineAllLists(temp_files_to_merge,original_filename)
else:
combineAllLists(files_to_merge,'',includeColumns=includeColumns)
print "Finished combining lists. Select return/enter to exit"; inp = sys.stdin.readline()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/mergeFilesVariant.py
|
mergeFilesVariant.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import time
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for file in dir_list:
if '.txt' in file: dir_list2.append(file)
return dir_list2
################# Begin Analysis
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importAnnotations(filename):
firstLine = True
fn = filepath(filename)
rows = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
tab_delimited_data = string.split(data,'\t')
if rows > 10: sys.exit()
print tab_delimited_data#;sys.exit()
rows+=1
def correlateMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1):
### Takes a filtered pre-processed beta-value file as input
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
def importMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1, filter=None):
annot_file = filepath('AltDatabase/ucsc/Hs/Illumina_methylation_genes.txt')
export_object = open(filename[:-4]+'-filtered.txt','w')
print filename[:-4]+'-filtered.txt', counts
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
#export_object.write(string.join(t,'\t')+'\n')
#"""
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object = open(annot_file,'w')
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
#if rows<50: print high, low, max(beta_values), min(beta_values)
#else:sys.exit()
#export_object.write(string.join(t[:delimiter])+'\n')
if high>=counts and low>=counts:
#if (high-low) > 0.2:
#if rows<50: print 1
if filter!=None:
if probeID in filter: proceed=True; probeID = str(filter[probeID])+':'+probeID
else: proceed = False
else: proceed = True
if proceed:
filtered+=1
export_object.write(string.join([probeID]+map(str,beta_values),'\t')+'\n')
if 'Illumina_name' in header:
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
rows+=1
#"""
export_object.close()
if delimiter == '-50':
annot_export_object.close()
print filtered, rows
def conFloat(x,betaValues):
try: x = float(x)
except Exception: x=None
if x== None or x == 0:
floats=[]
for i in betaValues:
if i=='': pass
elif float(i)==0: pass
else: floats.append(float(i))
try: return min(floats)
except Exception: print betaValues;sys.exit()
else:
return x
def betaHighCount(x,betaHigh):
if x>betaHigh:
return 1
else: return 0
def betaLowCount(x,betaLow):
if x<betaLow:
return 1
else: return 0
def getIDsFromFile(filename):
filterIDs = {}
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
filterIDs[string.lower(t[0])]=[]
return filterIDs
def getRegionType(filename,featureType=None,chromosome=None,filterIDs=None):
if filterIDs !=None:
filterIDs = getIDsFromFile(filterIDs)
firstLine = True
fn = filepath(filename)
count=0; filter_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,',')
if firstLine:
if len(t[2]) >0:
header = t
firstLine=False
chr_ind = header.index('CHR')
pos_ind = header.index('Coordinate_36')
tss_ind = header.index('UCSC_RefGene_Group')
gene_name = header.index('UCSC_RefGene_Name')
else:
probeID = t[0]
count+=1
try: gene_names = string.split(t[gene_name],';')
except Exception: gene_names = []
try:
if chromosome != None:
if t[chr_ind] == chromosome:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'promoter' in string.lower(featureType):
if 'TSS' in t[tss_ind]:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'mir' in string.lower(featureType) or 'micro' in string.lower(featureType):
if 'mir' in string.lower(t[gene_name]) or 'let' in string.lower(t[gene_name]):
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
except Exception:
pass
print len(filter_db), 'probes remaining'
return filter_db
if __name__ == '__main__':
import getopt
featureType = 'promoter'
featureType = 'all'
Species = 'Hs'
filter_db=None
chromosome=None
numRegulated = -1
analysis = 'filter'
filterIDs = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a methylation beta-value file as input in the command-line"
print "Example: python methylation.py --i /Users/me/sample1.txt --g /Users/me/human.gtf"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','a=','t=','r=','c=','f='])
for opt, arg in options:
if opt == '--i': input_file=arg
elif opt == '--a': analysis=arg
elif opt == '--t': featureType=arg
elif opt == '--r': numRegulated=int(arg)
elif opt == '--c': chromosome=arg
elif opt == '--f': filterIDs=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if analysis == 'filter':
filename = 'AltDatabase/ucsc/Hs/wgEncodeHaibMethyl450CpgIslandDetails.txt'
#input_file = '/Volumes/SEQ-DATA/PCBC/Methylation/Methylome70allBValues_aronowAnnotations.txt'
if featureType!= 'all' or chromosome != None or filterIDs!=None:
filter_db = getRegionType(filename,featureType=featureType,chromosome=chromosome,filterIDs=filterIDs)
importMethylationData(input_file,filter = filter_db,counts=numRegulated); sys.exit()
#importAnnotations(methylation_file);sys.exit()
if analysis == 'correlate':
### Performs all pairwise correlations between probes corresponding to a gene
correlateMethylationData(input_file)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/methylation.py
|
methylation.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
from stats_scripts import statistics
import unique
import export
dirfile = unique
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for entry in dir_list:
#if entry[-4:] == ".txt" or entry[-4:] == ".all" or entry[-5:] == ".data" or entry[-3:] == ".fa":
dir_list2.append(entry)
return dir_list2
def returnDirectories(sub_dir):
dir=os.path.dirname(dirfile.__file__)
dir_list = os.listdir(dir + sub_dir)
###Below code used to prevent FILE names from being included
dir_list2 = []
for entry in dir_list:
if "." not in entry: dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir: matches.append(data_dir)
return matches
############ Result Export Functions #############
def outputSummaryResults(summary_results_db,name,analysis_method,root_dir):
#summary_results_db[dataset_name] = udI,udI-up_diff,ddI,ddI-down_diff,udI_mx,udI_mx-mx_diff,up_dI_genes,down_gene, annotation_list
annotation_db = {}
for dataset in summary_results_db:
for entry in summary_results_db[dataset][-1]:
annotation = entry[0]
count = entry[1]
if 'AA:' not in annotation:
try: annotation_db[annotation].append((dataset,count))
except KeyError: annotation_db[annotation] = [(dataset,count)]
annotation_ls = []
for annotation in annotation_db: annotation_ls.append(annotation)
annotation_ls.sort()
annotation_db2={}
for annotation in annotation_ls:
for dataset in summary_results_db:
y=0
for entry in summary_results_db[dataset][-1]:
annotation2 = entry[0]
count = entry[1]
if annotation2 == annotation:
y=1; new_count = count
if y == 1:
try: annotation_db2[dataset].append((annotation,new_count))
except KeyError: annotation_db2[dataset] = [(annotation,new_count)]
else:
try: annotation_db2[dataset].append((annotation,0))
except KeyError: annotation_db2[dataset] = [(annotation,0)]
summary_output = root_dir+'AltResults/AlternativeOutput/'+analysis_method+'-summary-results'+name+'.txt'
fn=filepath(summary_output)
data = export.createExportFile(summary_output,'AltResults/AlternativeOutput')
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA':
event_type1 = 'inclusion-events'; event_type2 = 'exclusion-events'; event_type3 = 'alternative-exons'
else:
event_type1 = 'inclusion-events'; event_type2 = 'exclusion-events'; event_type3 = 'mutually-exlusive-events'
title = 'Dataset-name' +'\t'+ event_type1+'\t'+event_type2 +'\t'+ event_type3 +'\t'+ 'up-deltaI-genes' +'\t'+ 'down-deltaI-genes' +'\t'+ 'total-'+analysis_method+'-genes'
title = title +'\t' + 'upregulated_genes' +'\t'+ 'downregulated_genes' +'\t'+ analysis_method+'-genes-differentially-exp'+'\t'+ 'RNA_processing/binding-factors-upregulated' +'\t'+ 'RNA_processing/binding-factors-downregulated' +'\t'+ analysis_method+'_RNA_processing/binding-factors'
title = title +'\t'+ 'avg-downregulated-peptide-length' +'\t'+ 'std-downregulated-peptide-length' +'\t'+ 'avg-upregulated-peptide-length' +'\t'+ 'std-upregulated-peptide-length' +'\t'+ 'ttest-peptide-length' +'\t'+ 'median-peptide-length-fold-change'
for entry in annotation_ls: title = title +'\t'+ entry
data.write(title+'\n')
for dataset in summary_results_db:
values = dataset
for entry in summary_results_db[dataset][0:-1]: values = values +'\t'+ str(entry)
if dataset in annotation_db2:
for entry in annotation_db2[dataset]: values = values +'\t'+ str(entry[1])
data.write(values+'\n')
data.close()
def compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,analyzing_genes,analysis_method,array_type,root_dir):
aspire_gene_db = {}; aspire_event_db = {}; event_annotation_db = {}; dataset_name_list = []
#annotate_db[affygene] = name, symbol,ll_id,splicing_annotation
include_all_other_genes = 'yes'
for filename in aspire_output_list:
x = 0
fn=filepath(filename)
if '\\' in filename: names = string.split(filename,'\\') #grab file name
else: names = string.split(filename,'/')
try: names = string.split(names[-1],'-'+analysis_method)
except ValueError: print names;kill
name = names[0]
dataset_name_list.append(name)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.split(data,'\t') #remove endline
y = 0
if x == 0: x=1
else:
if analyzing_genes == 'no':
if (array_type == 'exon' or array_type == 'gene') and analysis_method in filename:
lowest_pvalue = float(data[8]);
try: si_p = float(data[20])
except Exception: si_p = 1
try: midas_p = float(data[9])
except ValueError: midas_p = 0
#print si_p,midas_p;kill
#if lowest_pvalue < 0.05:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[2]; description = data[3]
exon_set1 = data[4]; exon_set2 = ''
event_call = data[27]; functional_attribute = data[14]
uniprot_attribute = data[15]; gene_expression_change = data[22]
dI = dI*(-1)
elif analysis_method in filename:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[2]; description = data[3]
exon_set1 = data[4]+'('+data[8]+')'; exon_set2 = data[5]+'('+data[10]+')'
event_call = data[27]; functional_attribute = data[14]
uniprot_attribute = data[15]; gene_expression_change = data[22]
if analysis_method == 'linearregres' or analysis_method == 'ASPIRE':
functional_attribute = data[19]; uniprot_attribute = data[20]
#print exon_set1, exon_set2, data[:5];kill
else:
if (array_type == 'exon' or array_type == 'gene') and analysis_method in filename:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[3]; description = data[5]
dI_direction = data[6]; locus_link = affygene
exon_set1 = ''; exon_set2 = ''
event_call = data[-4]; functional_attribute = data[-9]
uniprot_attribute = data[-8]; gene_expression_change = data[-5]
if dI_direction == 'upregulated': dI = dI*(-1)
elif analysis_method in filename:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[3]; description = data[5]
dI_direction = data[6]; locus_link = data[4]
exon_set1 = ''; exon_set2 = ''
event_call = data[-4]; functional_attribute = data[-9]
uniprot_attribute = data[-8]; gene_expression_change = data[-5]
if dI_direction == 'downregulated': dI = dI*(-1)
#print affygene,data[-10:];kill
if y == 1:
data_tuple = [name,functional_attribute,uniprot_attribute,gene_expression_change,dI]
try: aspire_event_db[affygene,exon_set1,exon_set2].append(data_tuple)
except KeyError: aspire_event_db[affygene,exon_set1,exon_set2] = [data_tuple]
event_annotation_db[affygene,exon_set1,exon_set2] = event_call,symbol,description
aspire_event_db2 = {}; splice_gene_db = {}; dataset_name_list.sort()
for name in dataset_name_list:
for key in event_annotation_db:
###record all genes in the event_annotation_db
splice_gene_db[key[0]] = key[0]
if key in aspire_event_db:
x = 0
for entry in aspire_event_db[key]:
if entry[0] == name:
x = 1
dI = entry[1],entry[2],entry[3],entry[4]
try: aspire_event_db2[key].append(dI)
except KeyError: aspire_event_db2[key] = [dI]
if x ==0:
try: aspire_event_db2[key].append(('','','',0))
except KeyError: aspire_event_db2[key] = [('','','',0)]
else:
try: aspire_event_db2[key].append(('','','',0))
except KeyError: aspire_event_db2[key] = [('','','',0)]
for key in aspire_event_db2:
dataset_size = len(aspire_event_db2[key])
break
###Add all other Affygene's
temp=[]; x = 0
while x < dataset_size:
temp.append(('','','',0))
x +=1
for affygene in annotate_db:
if affygene not in splice_gene_db:
aspire_event_db2[affygene,'',''] = temp
if include_all_other_genes == 'yes': analysis_method+= '-all-genes'
if analyzing_genes == 'no': summary_output = root_dir+'AltResults/AlternativeOutput/'+analysis_method+'-comparisons-events.txt'
else: summary_output = root_dir+'AltResults/AlternativeOutput/'+analysis_method+'-'+ 'GENE-' +'comparisons-events.txt'
fn=filepath(summary_output)
data = open(fn,'w')
title = 'GeneID' +'\t'+ 'symbol'+'\t'+'description' +'\t'+ 'exon_set1' +'\t'+ 'exon_set2' +'\t'+ 'event_call' +'\t'+ 'splicing_factor_call'
for entry in dataset_name_list:
title = title +'\t'+ entry + '-functional-attribute' +'\t'+ entry + '-uniprot-attribute' +'\t'+ entry +'-GE-change' +'\t'+ entry +'-dI'
data.write(title +'\t'+ 'common-hits' + '\n')
for key in aspire_event_db2:
affygene = key[0]; exon_set1 = key[1]; exon_set2 = key[2]
if affygene in annotate_db: splicing_factor_call = annotate_db[affygene].RNAProcessing()
else: splicing_factor_call = ''
try:
event_call = event_annotation_db[key][0]
symbol = event_annotation_db[key][1]
description = event_annotation_db[key][2]
except KeyError:
event_call = ''; symbol = ''; description = ''
values = affygene +'\t'+ symbol +'\t'+ description +'\t'+ exon_set1 +'\t'+ exon_set2 +'\t'+ event_call +'\t'+ splicing_factor_call
x=0
for entry in aspire_event_db2[key]:
for info in entry: values = values +'\t'+ str(info)
if entry[-1] != 0: x +=1
values = values +'\t'+ str(x) + '\n'
if include_all_other_genes == 'no':
if x>0: data.write(values)
else: data.write(values)
data.close()
def exportTransitResults(array_group_list,array_raw_group_values,array_group_db,avg_const_exp_db,adj_fold_dbase,exon_db,dataset_name,apt_dir):
"""Export processed raw expression values (e.g. add global fudge factor or eliminate probe sets based on filters) to txt files
for analysis with MiDAS"""
#array_group_list contains group names in order of analysis
#array_raw_group_values contains expression values for the x number of groups in above list
#array_group_db key is the group name and values are the list of array names
#avg_const_exp_db contains the average expression values for all arrays for all constitutive probesets, with gene as the key
ordered_array_header_list=[]
for group in array_group_list: ###contains the correct order for each group
for array_id in array_group_db[group]:
ordered_array_header_list.append(str(array_id))
ordered_exp_val_db = {} ###new dictionary containing all expression values together, but organized based on group
probeset_affygene_db = {} ###lists all altsplice probesets and corresponding affygenes
for probeset in array_raw_group_values:
try:
include_probeset = 'yes'
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
if probeset in adj_fold_dbase: ###indicates that this probeset is analyzed for splicing (e.g. has a constitutive probeset)
for group_val_list in array_raw_group_values[probeset]:
non_log_group_exp_vals = statistics.log_fold_conversion(group_val_list)
for val in non_log_group_exp_vals:
try: ordered_exp_val_db[probeset].append(str(val))
except KeyError: ordered_exp_val_db[probeset] = [str(val)]
affygene = exon_db[probeset].GeneID()
try: probeset_affygene_db[affygene].append(probeset)
except KeyError: probeset_affygene_db[affygene] = [probeset]
except KeyError:
###Indicates that the expression dataset file was not filtered for whether annotations exist in the exon annotation file
###In that case, just ignore the entry
null = ''
gene_count = 0
ordered_gene_val_db={}
for affygene in avg_const_exp_db: ###now, add all constitutive gene level expression values (only per anlayzed gene)
if affygene in probeset_affygene_db: ###ensures we only include gene data where there are altsplice examined probesets
non_log_ordered_exp_const_val = statistics.log_fold_conversion(avg_const_exp_db[affygene])
gene_count+=1
for val in non_log_ordered_exp_const_val:
try: ordered_gene_val_db[affygene].append(str(val))
except KeyError: ordered_gene_val_db[affygene] = [str(val)]
convert_probesets_to_numbers={}
convert_affygene_to_numbers={}; array_type = 'junction'
probeset_affygene_number_db={}; x=0; y=0
for affygene in probeset_affygene_db:
x+=1; y = x ###each affygene has a unique number, from other affygenes and probesets and probesets count up from each affygene
x_copy = x
example_gene_probeset = probeset_affygene_db[affygene][0]
#if exon_db[example_gene_probeset].ArrayType() == 'exon': x_copy = exon_db[example_gene_probeset].SecondaryGeneID()
if x_copy not in exon_db:
convert_affygene_to_numbers[affygene] = str(x_copy)
else: print affygene, x_copy,'new numeric for MIDAS already exists as a probeset ID number'; kill
for probeset in probeset_affygene_db[affygene]:
y = y+1; y_copy = y
if exon_db[probeset].ArrayType() == 'exon':
y_copy = probeset ### Only appropriate when the probeset ID is a number
array_type = 'exon'
convert_probesets_to_numbers[probeset] = str(y_copy)
try: probeset_affygene_number_db[str(x_copy)].append(str(y_copy))
except KeyError: probeset_affygene_number_db[str(x_copy)] = [str(y_copy)]
x=y
metafile = 'AltResults/MIDAS/meta-'+dataset_name[0:-1]+'.txt'
data1 = export.createExportFile(metafile,'AltResults/MIDAS')
title = 'probeset_id\ttranscript_cluster_id\tprobeset_list\tprobe_count\n'
data1.write(title)
for affygene in probeset_affygene_number_db:
probeset_list = probeset_affygene_number_db[affygene]; probe_number = str(len(probeset_list)*6)
probeset_list = [string.join(probeset_list,' ')]
probeset_list.append(affygene); probeset_list.append(affygene); probeset_list.reverse(); probeset_list.append(probe_number)
probeset_list = string.join(probeset_list,'\t'); probeset_list=probeset_list+'\n'
data1.write(probeset_list)
data1.close()
junction_exp_file = 'AltResults/MIDAS/'+array_type+'-exp-'+dataset_name[0:-1]+'.txt'
fn2=filepath(junction_exp_file)
data2 = open(fn2,'w')
ordered_array_header_list.reverse(); ordered_array_header_list.append('probeset_id'); ordered_array_header_list.reverse()
title = string.join(ordered_array_header_list,'\t')
data2.write(title+'\n')
for probeset in ordered_exp_val_db:
probeset_number = convert_probesets_to_numbers[probeset]
exp_values = ordered_exp_val_db[probeset]; exp_values.reverse(); exp_values.append(probeset_number); exp_values.reverse()
exp_values = string.join(exp_values,'\t'); exp_values = exp_values +'\n'
data2.write(exp_values)
data2.close()
gene_exp_file = 'AltResults/MIDAS/gene-exp-'+dataset_name[0:-1]+'.txt'
fn3=filepath(gene_exp_file)
data3 = open(fn3,'w')
title = string.join(ordered_array_header_list,'\t')
data3.write(title+'\n')
for affygene in ordered_gene_val_db:
try: affygene_number = convert_affygene_to_numbers[affygene]
except KeyError: print len(convert_affygene_to_numbers), len(ordered_gene_val_db); kill
exp_values = ordered_gene_val_db[affygene]; exp_values.reverse(); exp_values.append(affygene_number); exp_values.reverse()
exp_values = string.join(exp_values,'\t'); exp_values = exp_values +'\n'
data3.write(exp_values)
data3.close()
exportMiDASArrayNames(array_group_list,array_group_db,dataset_name,'new')
coversionfile = 'AltResults/MIDAS/probeset-conversion-'+dataset_name[0:-1]+'.txt'
fn5=filepath(coversionfile)
data5 = open(fn5,'w')
title = 'probeset\tprobeset_number\n'; data5.write(title)
for probeset in convert_probesets_to_numbers: ###contains the correct order for each group
probeset_number = convert_probesets_to_numbers[probeset]
values = probeset+'\t'+probeset_number+'\n'
data5.write(values)
data5.close()
"""
### This code is obsolete... used before AltAnalyze could connect to APT directly.
commands = 'AltResults/MIDAS/commands-'+dataset_name[0:-1]+'.txt'
data = export.createExportFile(commands,'AltResults/MIDAS')
path = filepath('AltResults/MIDAS'); path = string.replace(path,'\\','/'); path = 'cd '+path+'\n\n'
metafile = 'meta-'+dataset_name[0:-1]+'.txt'
junction_exp_file = array_type+'-exp-'+dataset_name[0:-1]+'.txt'
gene_exp_file = 'gene-exp-'+dataset_name[0:-1]+'.txt'
celfiles = 'celfiles-'+dataset_name[0:-1]+'.txt'
command_line = 'apt-midas -c '+celfiles+' -g '+gene_exp_file+' -e '+junction_exp_file+' -m '+metafile+' -o '+dataset_name[0:-1]+'-output'
data.write(path); data.write(command_line); data.close()
"""
status = runMiDAS(apt_dir,array_type,dataset_name,array_group_list,array_group_db)
return status
def exportMiDASArrayNames(array_group_list,array_group_db,dataset_name,type):
celfiles = 'AltResults/MIDAS/celfiles-'+dataset_name[0:-1]+'.txt'
fn4=filepath(celfiles)
data4 = open(fn4,'w')
if type == 'old': cel_files = 'cel_file'
elif type == 'new': cel_files = 'cel_files'
title = cel_files+'\tgroup_id\n'; data4.write(title)
for group in array_group_list: ###contains the correct order for each group
for array_id in array_group_db[group]:
values = str(array_id) +'\t'+ str(group) +'\n'
data4.write(values)
data4.close()
def getAPTDir(apt_fp):
###Determine if APT has been set to the right directory and add the analysis_type to the filename
if 'bin' not in apt_fp: ###This directory contains the C+ programs that we wish to call
if 'apt' in apt_fp: ###This directory is the parent directory to 'bin'
apt_fp = apt_fp+'/'+'bin'
elif 'Affymetrix Power Tools' in apt_fp: ###The user selected the parent directory
dir_list = read_directory(apt_fp); versions = [] ###See what folders are in this directory (e.g., specific APT versions)
for folder in dir_list: ###If there are multiple versions
if 'apt' in folder:
version = string.replace(folder,'apt-','')
version = string.split(version,'.'); version_int_list = []
try:
for val in version: version_int_list.append(int(val))
versions.append([version_int_list,folder])
except Exception:
versions.append([folder,folder]) ### arbitrarily choose an APT version if multiple exist and the folder name does not conform to the above
if len(versions)>0:
###By making the versions indexed by the integer list value of the version, we can grab the latest version
versions.sort(); apt_fp = apt_fp+'/'+versions[-1][1]+'/'+'bin' ###Add the full path to the most recent version
return apt_fp
def runMiDAS(apt_dir,array_type,dataset_name,array_group_list,array_group_db):
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-midas' ### if the user selects an APT directory
elif os.name == 'nt':
import platform
if '32bit' in platform.architecture(): apt_file = apt_dir + '/PC/32bit/apt-midas'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/PC/64bit/apt-midas'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-midas'
elif 'linux' in sys.platform:
import platform
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-midas'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-midas'
apt_file = filepath(apt_file)
### Each input file for MiDAS requires a full file path, so get the parent path
midas_input_dir = 'AltResults/MIDAS/'
path=filepath(midas_input_dir)
### Remotely connect to the previously verified APT C+ midas program and run analysis
metafile = path + 'meta-'+dataset_name[0:-1]+'.txt'
exon_or_junction_file = path + array_type+'-exp-'+dataset_name[0:-1]+'.txt'
gene_exp_file = path + 'gene-exp-'+dataset_name[0:-1]+'.txt'
celfiles = path + 'celfiles-'+dataset_name[0:-1]+'.txt'
output_file = path + dataset_name[0:-1]+'-output'
### Delete the output folder if it already exists (may cause APT problems)
delete_status = export.deleteFolder(output_file)
try:
import subprocess
retcode = subprocess.call([
apt_file, "--cel-files", celfiles,"-g", gene_exp_file, "-e", exon_or_junction_file,
"-m", metafile, "-o", output_file])
if retcode: status = 'failed'
else: status = 'run'
except NameError: status = 'failed'
if status == 'failed':
try:
### Try running the analysis with old MiDAS file headers and command
exportMiDASArrayNames(array_group_list,array_group_db,dataset_name,'old')
import subprocess
retcode = subprocess.call([
apt_file, "-c", celfiles,"-g", gene_exp_file, "-e", exon_or_junction_file,
"-m", metafile, "-o", output_file])
if retcode: status = 'failed'
else: status = 'run'
except Exception: status = 'failed'
if status == 'failed': print "apt-midas failed"
else: print "apt-midas run successfully"
return status
def importMidasOutput(dataset_name):
coversionfile = 'AltResults/MIDAS/probeset-conversion-'+dataset_name[0:-1]+'.txt'
#print "Looking for", coversionfile
fn=filepath(coversionfile); x=0; probeset_conversion_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1 ###Occurs for the header line
else:
probeset,probeset_number = string.split(data,'\t')
probeset_conversion_db[probeset_number] = probeset
midas_results = 'AltResults/MIDAS/'+dataset_name[:-1]+'-output'+'/midas.pvalues.txt'
fn=filepath(midas_results); x=0; midas_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data[0] == '#': continue
elif x==0: x=1 ###Occurs for the header line
else:
t = string.split(data,'\t')
try: probeset_number,geneid,p = t
except ValueError: print t;kill
try: p = float(p)
except ValueError: p = 1.000 ### "-1.#IND" can occur, when the constituitive and probeset are the same
probeset = probeset_conversion_db[probeset_number]
midas_db[probeset] = p
return midas_db
def combineRawSpliceResults(species,analysis_method):
import_dir = '/AltResults/RawSpliceData/'+species+'/'+analysis_method
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('combined') ###made this a term to excluded
headers =[]; combined_data={}
for filename in files_to_merge:
fn=filepath(filename); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
headers += t[1:]
x=1 ###Occurs for the header line
else:
values = t; values = values[1:]; key = t[0]
try: combined_data[key]+=values
except KeyError: combined_data[key]=values
max_len=0 ###Some files will contain data that others won't... normalize for this, so we only include raws where there is data in all files examined
for key in combined_data:
if len(combined_data[key])>max_len: max_len = len(combined_data[key])
combined_data2 = {}; k=0; j=0
for key in combined_data:
#print combined_data[key];kill
### '1' in the list, then there was only one constitutive probeset that was 'expressed' in that dataset: thus comparisons are not likely valid
count = list.count(combined_data[key],'1.0')
if len(combined_data[key])==max_len and count <3: combined_data2[key] = combined_data[key]
elif len(combined_data[key])!=max_len: k+=1#; print key,max_len, len(combined_data[key]),combined_data[key]; kill
elif count >2: j+=1
combined_data = combined_data2
#print k,j
export_file = import_dir[1:]+'/combined.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['gene-probeset']+headers,'\t')+'\n'; data.write(title)
for key in combined_data:
values = string.join([key]+combined_data[key],'\t')+'\n'; data.write(values)
data.close()
print "exported",len(combined_data),"to",export_file
def import_annotations(filename,array_type):
from build_scripts import ExonAnalyze_module
fn=filepath(filename); annotate_db = {}; x = 0
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
try: affygene, description, ll_id, symbol, rna_processing_annot = string.split(data,'\t')
except ValueError: affygene, description, ll_id, symbol = string.split(data,'\t'); splicing_annotation = ''
if '"' in description: null,description,null = string.split(description,'"')
rna_processing_annot =''
y = ExonAnalyze_module.GeneAnnotationData(affygene, description, symbol, ll_id, rna_processing_annot)
annotate_db[affygene] = y
else:
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
rna_processing_annot=''
try: ensembl, description, symbol, rna_processing_annot = string.split(data,'\t')
except ValueError: ensembl, description, symbol = string.split(data,'\t')
y = ExonAnalyze_module.GeneAnnotationData(ensembl, description, symbol, ensembl, rna_processing_annot)
annotate_db[ensembl] = y
return annotate_db
if __name__ == '__main__':
array_type = 'exon'
a = 'Mm'; b = 'Hs'
e = 'ASPIRE'; f = 'linearregres'; g = 'ANOVA'; h = 'splicing-index'
analysis_method = h
species = b ### edit this
if array_type != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
annotate_db = import_annotations(gene_annotation_file,array_type)
number_events_analyzed = 0
analyzing_genes = 'no'
root_dir = 'C:/Users/Nathan Salomonis/Desktop/Gladstone/1-datasets/Combined-GSE14588_RAW/junction/' ### edit this
a = root_dir+'AltResults/AlternativeOutput/Hs_Junction_d14_vs_d7.p5_average-ASPIRE-exon-inclusion-results.txt' ### edit this
b = root_dir+'AltResults/AlternativeOutput/Hs_Junction_d14_vs_d7.p5_average-splicing-index-exon-inclusion-results.txt' ### edit this
aspire_output_list = [a,b]
compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,analyzing_genes,analysis_method,array_type,root_dir)
#dataset_name = 'test.'; apt_dir = 'AltDatabase/affymetrix/APT'
#aspire_output_gene_list = ['AltResults/AlternativeOutput/Hs_Exon_CS-d40_vs_hESC-d0.p5_average-splicng_index-exon-inclusion-GENE-results.txt', 'AltResults/AlternativeOutput/Hs_Exon_Cyt-NP_vs_Cyt-ES.p5_average-splicing_index-exon-inclusion-GENE-results.txt', 'AltResults/AlternativeOutput/Hs_Exon_HUES6-NP_vs_HUES6-ES.p5_average-splicing_index-exon-inclusion-GENE-results.txt']
#runMiDAS(apt_dir,array_type,dataset_name,{},()); sys.exit()
#midas_db = importMidasOutput(dataset_name)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/ResultsExport_module.py
|
ResultsExport_module.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
import copy,getopt
import time
import traceback
try: import export
except Exception: pass
try: import unique
except Exception: pass
import Bio; from Bio.Seq import Seq
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def parseFASTQFile(fn):
count=0
spacer='TGGT'
global_count=0
read2_viral_barcode={}
read1_cellular_barcode={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
global_count+=1
if count == 0:
read_id = string.split(data,' ')[0][1:]
count+=1
elif count == 1:
sequence = data
count+=1
else:
count+=1
if count == 4:
count = 0
if 'R2' in fn:
if spacer in sequence:
if sequence.index(spacer) == 14:
viral_barcode = sequence[:48]
read2_viral_barcode[read_id]=viral_barcode
else: ### Reverse complement
sequence = Seq(sequence)
sequence=str(sequence.reverse_complement())
if spacer in sequence:
if sequence.index(spacer) == 14:
viral_barcode = sequence[:48]
read2_viral_barcode[read_id]=viral_barcode
if 'R1' in fn:
if 'TTTTT' in sequence:
cell_barcode = sequence[:16]
read1_cellular_barcode[read_id]=cell_barcode
elif 'AAAAA' in sequence: ### Reverse complement
sequence = Seq(sequence)
cell_barcode=str(sequence.reverse_complement())[:16]
read1_cellular_barcode[read_id]=cell_barcode
if 'R2' in fn:
return read2_viral_barcode
else:
return read1_cellular_barcode
def outputPairs(fastq_dir,read1_cellular_barcode,read2_viral_barcode):
outdir = fastq_dir+'.viral_barcodes.txt'
o = open (outdir,"w")
unique_pairs={}
for uid in read2_viral_barcode:
if uid in read1_cellular_barcode:
cellular = read1_cellular_barcode[uid]
viral = read2_viral_barcode[uid]
if (viral,cellular) not in unique_pairs:
o.write(viral+'\t'+cellular+'\n')
unique_pairs[(viral,cellular)]=[]
o.close()
if __name__ == '__main__':
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a SAM file as input in the command-line"
print "Example: python BAMtoJunctionBED.py --i /Users/me/sample1.fastq"
sys.exit()
else:
Species = None
options, remainder = getopt.getopt(sys.argv[1:],'', ['i='])
for opt, arg in options:
if opt == '--i': fastq_dir=arg ### full path of a BAM file
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if 'R1' in fastq_dir:
r1 = fastq_dir
r2 = string.replace(fastq_dir,'R1','R2')
else:
r1 = string.replace(fastq_dir,'R2','R1')
r2= fastq_dir
read2_viral_barcode = parseFASTQFile(r2)
read1_cellular_barcode = parseFASTQFile(r1)
outputPairs(fastq_dir,read1_cellular_barcode,read2_viral_barcode)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/FASTQtoBarcode.py
|
FASTQtoBarcode.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import UI
import export; reload(export)
import time
import shutil
import traceback
def filepath(filename):
fn = unique.filepath(filename)
return fn
def osfilepath(filename):
fn = filepath(filename)
fn = string.replace(fn,'\\','/')
return fn
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def annotateMetaProbesetGenes(summary_exp_file, expression_file, metaprobeset_file, species):
metaprobeset_cv_file = string.replace(metaprobeset_file,species+'_',species+'_Conversion_')
metaprobeset_cv_file = string.replace(metaprobeset_cv_file,'.mps','.txt')
fn=filepath(metaprobeset_cv_file); uid_db={}
for line in open(fn,'rU').xreadlines():
data = UI.cleanUpLine(line)
uid,ens_gene = string.split(data,'\t')
uid_db[uid] = ens_gene
export_data = export.ExportFile(expression_file)
fn=filepath(summary_exp_file); x=0
for line in open(fn,'rU').xreadlines():
if line[0] == '#': null=[]
elif x == 0: export_data.write(line); x+=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]; ens_gene = uid_db[uid]
export_data.write(string.join([ens_gene]+t[1:],'\t')+'\n')
export_data.close()
def reformatResidualFile(residual_exp_file,residual_destination_file):
### Re-write the residuals file so it has a single combined unique ID (arbitrary gene ID + probe ID)
print 'Re-formatting and moving the calculated residuals file...'
export_data = export.ExportFile(residual_destination_file)
fn=filepath(residual_exp_file); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0 and data[0]=='#': null=[]
elif x == 0:
x+=1; t = string.split(data,'\t')
export_data.write(string.join(['UID']+t[5:],'\t')+'\n')
else:
t = string.split(data,'\t')
uid = t[0]+'-'+t[2] ### arbitrary numeric gene ID + probes ID
export_data.write(string.join([uid]+t[5:],'\t')+'\n')
export_data.close()
os.remove(residual_exp_file)
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def APTDebugger(output_dir):
fatal_error = ''
fn = filepath(output_dir+'/apt-probeset-summarize.log')
for line in open(fn,'rU').xreadlines():
if 'FATAL ERROR:' in line:
fatal_error = line
return fatal_error
def probesetSummarize(exp_file_location_db,analyze_metaprobesets,probeset_type,species,root):
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]
apt_dir =fl.APTLocation()
array_type=fl.ArrayType()
pgf_file=fl.InputCDFFile()
clf_file=fl.CLFFile()
bgp_file=fl.BGPFile()
xhyb_remove = fl.XHybRemoval()
cel_dir=fl.CELFileDir() + '/cel_files.txt'
expression_file = fl.ExpFile()
stats_file = fl.StatsFile()
output_dir = fl.OutputDir() + '/APT-output'
cache_dir = output_dir + '/apt-probeset-summarize-cache'
architecture = fl.Architecture() ### May over-ride the real architecture if a failure occurs
get_probe_level_results = 'yes'
if get_probe_level_results == 'yes': export_features = 'yes'
if xhyb_remove == 'yes' and (array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
if analyze_metaprobesets == 'yes':
export_features = 'true'
metaprobeset_file = filepath('AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'_'+probeset_type+'.mps')
count = verifyFileLength(metaprobeset_file)
if count<2:
from build_scripts import ExonArray
ExonArray.exportMetaProbesets(array_type,species) ### Export metaprobesets for this build
import subprocess; import platform
print 'Processor architecture set =',architecture,platform.machine()
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt':
if '32bit' in architecture: apt_file = apt_dir + '/PC/32bit/apt-probeset-summarize'; plat = 'Windows'
elif '64bit' in architecture: apt_file = apt_dir + '/PC/64bit/apt-probeset-summarize'; plat = 'Windows'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-probeset-summarize'; plat = 'MacOSX'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'; plat = 'linux32bit'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'; plat = 'linux64bit'
apt_file = filepath(apt_file)
apt_extract_file = string.replace(apt_file,'probeset-summarize','cel-extract')
#print 'AltAnalyze has choosen APT for',plat
print "Beginning probeset summarization of input CEL files with Affymetrix Power Tools (APT)..."
if 'cdf' in pgf_file or 'CDF' in pgf_file:
if xhyb_remove == 'yes' and array_type == 'AltMouse':
kill_list_dir = osfilepath('AltDatabase/'+species+'/AltMouse/'+species+'_probes_to_remove.txt')
else: kill_list_dir = osfilepath('AltDatabase/affymetrix/APT/probes_to_remove.txt')
try:
### Below code attempts to calculate probe-level summarys and absent/present p-values
### for 3'arrays (may fail for arrays with missing missmatch probes - AltMouse)
cdf_file = pgf_file; algorithm = 'rma'
retcode = subprocess.call([
apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir,
"--cel-files", cel_dir, "-a", "pm-mm,mas5-detect.calls=1.pairs=1"])
try:
extract_retcode = subprocess.call([
apt_extract_file, "-d", cdf_file, "--pm-with-mm-only", "-o", output_dir+'/probe.summary.txt',
"--cel-files", cel_dir, "-a"]) ### "quant-norm,pm-gcbg", "--report-background" -requires a BGP file
except Exception,e:
#print traceback.format_exc()
retcode = False ### On some system there is a no file found error, even when the analysis completes correctly
if retcode: status = 'failed'
else:
status = 'run'
summary_exp_file = output_dir+'/'+algorithm+'.summary.txt'
export.customFileCopy(summary_exp_file, expression_file) ### Removes the # containing lines
#shutil.copyfile(summary_exp_file, expression_file)
os.remove(summary_exp_file)
summary_stats_file = output_dir+'/pm-mm.mas5-detect.summary.txt'
try: shutil.copyfile(summary_stats_file, stats_file)
except Exception: None ### Occurs if dabg export failed
os.remove(summary_stats_file)
except Exception:
#print traceback.format_exc()
try:
cdf_file = pgf_file; algorithm = 'rma'; pval = 'dabg'
retcode = subprocess.call([
apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir, "--cel-files", cel_dir]) # "-a", pval,
if retcode: status = 'failed'
else:
status = 'run'
summary_exp_file = output_dir+'/'+algorithm+'.summary.txt'
export.customFileCopy(summary_exp_file, expression_file) ### Removes the # containing lines
#shutil.copyfile(summary_exp_file, expression_file)
os.remove(summary_exp_file)
except NameError:
status = 'failed'
#print traceback.format_exc()
else:
if xhyb_remove == 'yes':
kill_list_dir = osfilepath('AltDatabase/'+species+'/exon/'+species+'_probes_to_remove.txt')
else: kill_list_dir = osfilepath('AltDatabase/affymetrix/APT/probes_to_remove.txt')
if 'Glue' in pgf_file:
kill_list_dir = string.replace(pgf_file,'pgf','kil') ### Needed to run DABG without crashing
#psr_dir = string.replace(pgf_file,'pgf','PSR.ps') ### used with -s
try:
algorithm = 'rma-sketch'; pval = 'dabg'
if analyze_metaprobesets != 'yes':
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir,
"-a", algorithm, "-a", pval, "-o", output_dir, "--cel-files", cel_dir]) # "--chip-type", "hjay", "--chip-type", "HJAY" http://www.openbioinformatics.org/penncnv/penncnv_tutorial_affy_gw6.html
if retcode:
summary_exp_file = output_dir+'/'+pval+'.summary.txt'
try: os.remove(summary_exp_file)
except Exception: null=[] ### Occurs if dabg export failed
fatal_error = APTDebugger(output_dir)
if len(fatal_error)>0:
print fatal_error
print 'Skipping DABG p-value calculation to resolve (Bad library files -> contact Affymetrix support)'
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir,
"-a", algorithm, "-o", output_dir, "--cel-files", cel_dir]) ### Exclude DABG p-value - known issue for Glue junction array
else: bad_exit
else:
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir, "-m", metaprobeset_file,
"-a", algorithm, "-a", pval, "-o", output_dir, "--cel-files", cel_dir, "--feat-details", export_features])
if retcode:
summary_exp_file = output_dir+'/'+pval+'.summary.txt'
try: os.remove(summary_exp_file)
except Exception: null=[] ### Occurs if dabg export failed
fatal_error = APTDebugger(output_dir)
if len(fatal_error)>0:
print fatal_error
print 'Skipping DABG p-value calculation to resolve (Bad library files -> contact Affymetrix support)'
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir, "-m", metaprobeset_file,
"-a", algorithm, "-o", output_dir, "--cel-files", cel_dir, "--feat-details", export_features]) ### Exclude DABG p-value - known issue for Glue junction array
else: bad_exit
if retcode: status = 'failed'
else:
status = 'run'
summary_exp_file = output_dir+'/'+algorithm+'.summary.txt'
#if analyze_metaprobesets == 'yes': annotateMetaProbesetGenes(summary_exp_file, expression_file, metaprobeset_file, species)
export.customFileCopy(summary_exp_file, expression_file) ### Removes the # containing lines
#shutil.copyfile(summary_exp_file, expression_file)
os.remove(summary_exp_file)
summary_exp_file = output_dir+'/'+pval+'.summary.txt'
#if analyze_metaprobesets == 'yes': annotateMetaProbesetGenes(summary_exp_file, stats_file, metaprobeset_file, species)
try:
shutil.copyfile(summary_exp_file, stats_file)
os.remove(summary_exp_file)
except Exception:
print traceback.format_exc()
null=[] ### Occurs if dabg export failed
if analyze_metaprobesets == 'yes':
residual_destination_file = string.replace(expression_file,'exp.','residuals.')
residual_exp_file = output_dir+'/'+algorithm+'.residuals.txt'
#shutil.copyfile(residual_exp_file, residual_destination_file);os.remove(residual_exp_file)
reformatResidualFile(residual_exp_file,residual_destination_file)
residual_dabg_file = output_dir+'/dabg.residuals.txt'; os.remove(residual_dabg_file)
except NameError:
status = 'failed'
#print traceback.format_exc()
cache_delete_status = export.deleteFolder(cache_dir)
if status == 'failed':
if architecture == '64bit' and platform.architecture()[0] == '64bit' and (os.name == 'nt' or 'linux' in sys.platform):
print 'Warning! 64bit version of APT encountered an error, trying 32bit.'
### If the above doesn't work, try 32bit architecture instead of 64bit (assuming the problem is related to known transient 64bit build issues)
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]; fl.setArchitecture('32bit')
probesetSummarize(exp_file_location_db,analyze_metaprobesets,probeset_type,species,root)
else:
print_out = 'apt-probeset-summarize failed. See log and report file in the output folder under "ExpressionInput/APT-output" for more details.'
try:
WarningWindow(print_out,'Exit')
root.destroy()
except Exception:
print print_out; force_exit
else:
print 'CEL files successfully processed. See log and report file in the output folder under "ExpressionInput/APT-output" for more details.'
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/APT.py
|
APT.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
from stats_scripts import statistics
import os.path
import unique
import UI
import export
import time
import traceback
import RNASeq
def agilentSummarize(exp_file_location_db):
print 'Agilent array import started'
global red_channel_db
global green_channel_db
red_channel_db={}
green_channel_db={}
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]
output_dir = fl.OutputDir()
array_dir=fl.CELFileDir()
group_dir = fl.GroupsFile() ### provides the list of array_files
channel_to_extract = fl.ChannelToExtract()
expression_file = fl.ExpFile()
array_group_list = UI.importArrayGroupsSimple(group_dir,[])[0]
normalization_method = fl.NormMatrix()
arrays = map(lambda agd: agd.Array(), array_group_list) ### Pull the array names out of this list of objects
dir_list = unique.read_directory(array_dir)
count=0
for array in dir_list:
if array in arrays: ### Important since other text files may exist in that directory
count+=1
filename = array_dir+'/'+array
importAgilentExpressionValues(filename,array,channel_to_extract)
if count == 50:
print '' ### For progress printing
count = 0
if len(green_channel_db)>0:
filename = output_dir+ '/'+ 'gProcessed/gProcessed-'+dataset+'-raw.txt'
exportExpressionData(filename,green_channel_db)
if 'quantile' in normalization_method:
print '\nPerforming quantile normalization on the green channel...'
green_channel_db = RNASeq.quantileNormalizationSimple(green_channel_db)
filename = output_dir+ '/'+ 'gProcessed/gProcessed-'+dataset+'-quantile.txt'
exportExpressionData(filename,green_channel_db)
final_exp_db = green_channel_db
if len(red_channel_db)>0:
filename = output_dir+ '/'+ 'rProcessed/rProcessed-'+dataset+'-raw.txt'
exportExpressionData(filename,red_channel_db)
if 'quantile' in normalization_method:
print '\nPerforming quantile normalization on the red channel...'
red_channel_db = RNASeq.quantileNormalizationSimple(red_channel_db)
filename = output_dir+ '/'+ 'rProcessed/rProcessed-'+dataset+'-quantile.txt'
exportExpressionData(filename,red_channel_db)
final_exp_db = red_channel_db
if len(red_channel_db)>0 and len(green_channel_db)>0:
if channel_to_extract == 'green/red ratio':
final_exp_db = calculateRatios(green_channel_db,red_channel_db)
elif channel_to_extract == 'red/green ratio':
final_exp_db = calculateRatios(red_channel_db,green_channel_db)
exportExpressionData(expression_file,final_exp_db)
print 'Exported expression input file to:',expression_file
def calculateRatios(db1,db2):
ratio_db={}
for array in db1:
exp_ratios={}
exp_db = db1[array]
for probe_name in exp_db:
exp_ratios[probe_name] = str(float(exp_db[probe_name])-float(db2[array][probe_name])) ### log2 ratio
ratio_db[array]=exp_ratios
return ratio_db
def importAgilentExpressionValues(filename,array,channel_to_extract):
""" Imports Agilent Feature Extraction files for one or more channels """
print '.',
red_expr_db={}
green_expr_db={}
parse=False
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
data = UI.cleanUpLine(line)
if parse==False:
if 'ProbeName' in data:
headers = string.split(data,'\t')
pn = headers.index('ProbeName')
try: gc = headers.index('gProcessedSignal')
except Exception: pass
try: rc = headers.index('rProcessedSignal')
except Exception: pass
parse = True
else:
t = string.split(data,'\t')
probe_name = t[pn]
try: green_channel = math.log(float(t[gc])+1,2) #min is 0
except Exception: pass
try: red_channel = math.log(float(t[rc])+1,2) #min is 0
except Exception: pass
if 'red' in channel_to_extract:
red_expr_db[probe_name] = red_channel
if 'green' in channel_to_extract:
green_expr_db[probe_name] = green_channel
if 'red' in channel_to_extract:
red_channel_db[array] = red_expr_db
if 'green' in channel_to_extract:
green_channel_db[array] = green_expr_db
def exportExpressionData(filename,sample_db):
export_text = export.ExportFile(filename)
all_genes_db = {}
sample_list=[]
for sample in sample_db:
sample_list.append(sample)
gene_db = sample_db[sample]
for geneid in gene_db:
all_genes_db[geneid]=[]
sample_list.sort() ### Organize these alphabetically rather than randomly
column_header = string.join(['ProbeName']+sample_list,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
for geneid in all_genes_db:
values=[]
for sample in sample_list:
try: values.append(sample_db[sample][geneid]) ### protein_expression
except Exception: values.append(0)
export_text.write(string.join([geneid]+map(str, values),'\t')+'\n')
export_text.close()
if __name__ == '__main__':
None
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/ProcessAgilentArrays.py
|
ProcessAgilentArrays.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import unique
#Annotate PSI file's splicing events using alternate_juntion and alternate_junction_de-novo
def verifyFile(filename):
status = False
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines(): status = True;break
except Exception: status = False
return status
class SplicingEventAnnotation:
def __init__(self, event, critical_exon):
self.event = event; self.critical_exon = critical_exon
def Event(self): return self.event
def CriticalExon(self): return self.critical_exon
def setJunctions(self,junctions): self.junctions = junctions
def Junctions(self): return self.junctions
def ExonOnly(self):
try: exon = string.split(self.critical_exon,':')[1]
except Exception: exon = self.critical_exon
return exon
def importPSIevents(PSIpath,species):
header=True
count=0;count1=0;
initial_events={}
psievents={}
psijunctions={}
# Create a dictionary for the psi event dict[min_isoform,major_isoform] and value will be the annotation that will assigned later in the code
for line in open(PSIpath,'rU').xreadlines():
line = line.rstrip('\n')
#line=string.replace(line,"_",".")
values=string.split(line,'\t')
if header:
aI = values.index('AltExons')
header=False
continue
primary_junction = values[2]
secondary_junction = values[3]
critical_exon = values[aI]
se = SplicingEventAnnotation(None,critical_exon)
se.setJunctions([primary_junction,secondary_junction])
psievents[primary_junction,secondary_junction] = se
psijunctions[(primary_junction,)] = se ### format for importing protein annotations
psijunctions[(secondary_junction,)] = se
print"PSI events imported..."
return psievents,psijunctions
def importEventAnnotations(resultsDir,species,psievents,annotationType=None):
# Parse to the de-novo junction file and update the value of the psi events in the dictionary
header=True
if annotationType == 'de novo':
junction_file = string.split(resultsDir,'AltResults')[0]+'AltDatabase/ensembl/'+species+'/'+species+'_alternative_junctions_de-novo.txt'
else:
junction_file = 'AltDatabase/ensembl/'+species+'/'+species+'_alternative_junctions.txt'
count=0
fn = unique.filepath(junction_file)
initial_events={}
initial_critical_exons={}
status = verifyFile(fn)
if status:
for line in open(fn,'rU').xreadlines():
line = line.rstrip('\n')
if header:
header=False
continue
gene, critical_exon, j1, j2, event = string.split(line,'\t')
critical_exon = gene+':'+critical_exon
### Fix this notation
if len(j1)>17 and '_' not in j1:
j1a,j1b = string.split(j1,'-')
if len(j1a)>8:
a,b,c = string.split(j1a,'.')
j1a = a+'.'+b+'_'+c
if len(j1b)>8:
a,b,c = string.split(j1b,'.')
j1b = a+'.'+b+'_'+c
j1 = j1a+'-'+j1b
if len(j2)>17 and '_' not in j2:
j2a,j2b = string.split(j2,'-')
if len(j2a)>8:
a,b,c = string.split(j2a,'.')
j2a = a+'.'+b+'_'+c
if len(j2b)>8:
a,b,c = string.split(j2b,'.')
j2b = a+'.'+b+'_'+c
j2 = j2a+'-'+j2b
j1=gene+':'+j1
j2=gene+':'+j2
if ((j1,j2) in psievents) or ((j2,j1) in psievents):
try: initial_events[j1,j2].append(event)
except Exception: initial_events[j1,j2] = [event]
try: initial_critical_exons[j1,j2].append(critical_exon)
except Exception: initial_critical_exons[j1,j2] = [critical_exon]
for junctions in initial_events:
events = unique.unique(initial_events[junctions])
events.sort()
events = string.join(events,'|')
critical_exons = unique.unique(initial_critical_exons[junctions])
critical_exons.sort()
critical_exons = string.join(critical_exons,'|')
se = SplicingEventAnnotation(events,critical_exons)
j1,j2 = junctions
psievents[j1,j2] = se
psievents[j2,j1] = se
count+=1
print count, "junction annotations imported..."
return psievents
def importDatabaseEventAnnotations(species,platform):
terminal_exons={}
header=True
count=0
fn = 'AltDatabase/'+species+'/'+platform+'/'+species+'_Ensembl_exons.txt'
fn = unique.filepath(fn)
for line in open(fn,'rU'):
line = line.rstrip('\n')
values = string.split(line,'\t')
if header:
eI = values.index('splice_events')
header=False
continue
exon = values[0]
event = values[eI]
if 'alt-N-term' in event or 'altPromoter' in event:
if 'cassette' not in event:
terminal_exons[exon] = 'altPromoter'
count+=1
elif 'alt-C-term' in event:
if 'cassette' not in event:
terminal_exons[exon] = 'alt-C-term'
count+=1
"""
elif 'bleedingExon' in event or 'altFinish' in event:
terminal_exons[exon] = 'bleedingExon'
count+=1"""
print count, 'terminal exon annotations stored'
return terminal_exons
def inverseFeatureDirections(features):
features2=[]
for f in features:
if '+' in f:
f = string.replace(f,'+','-')
else:
f = string.replace(f,'-','+')
features2.append(f)
return features2
def formatFeatures(features):
features2=[]
for f in features:
f = string.split(f,'|')
direction = f[-1]
annotation = f[0]
f = '('+direction+')'+annotation
features2.append(f)
return features2
def importIsoformAnnotations(species,platform,psievents,annotType=None,junctionPairFeatures={},dataType='reciprocal'):
count=0
if annotType == 'domain':
if dataType == 'reciprocal':
fn = 'AltDatabase/'+species+'/'+platform+'/'+'probeset-domain-annotations-exoncomp.txt'
else:
fn = 'AltDatabase/'+species+'/'+platform+'/junction/'+'probeset-domain-annotations-exoncomp.txt'
else:
if dataType == 'reciprocal':
fn = 'AltDatabase/'+species+'/'+platform+'/'+'probeset-protein-annotations-exoncomp.txt'
else:
fn = 'AltDatabase/'+species+'/'+platform+'/junction/'+'probeset-protein-annotations-exoncomp.txt'
fn = unique.filepath(fn)
for line in open(fn,'rU'):
line = line.rstrip('\n')
values = string.split(line,'\t')
junctions = string.split(values[0],'|')
features = formatFeatures(values[1:])
antiFeatures = inverseFeatureDirections(features)
if tuple(junctions) in psievents:
try: junctionPairFeatures[tuple(junctions)].append(string.join(features,', '))
except Exception: junctionPairFeatures[tuple(junctions)] = [string.join(features,', ')]
if dataType == 'reciprocal':
junctions.reverse()
if tuple(junctions) in psievents:
try: junctionPairFeatures[tuple(junctions)].append(string.join(antiFeatures,', '))
except Exception: junctionPairFeatures[tuple(junctions)] = [string.join(antiFeatures,', ')]
count+=1
print count, 'protein predictions added'
return junctionPairFeatures
def DetermineIntronRetention(coordinates):
intronRetention = False
coordinates1,coordinates2 = string.split(coordinates,'|')
coordinates1 = string.split(coordinates1,':')[1]
coordinate1a, coordinate1b = string.split(coordinates1,'-')
coordinate1_diff = abs(float(coordinate1a)-float(coordinate1b))
coordinates2 = string.split(coordinates2,':')[1]
coordinate2a, coordinate2b = string.split(coordinates2,'-')
coordinate2_diff = abs(float(coordinate2a)-float(coordinate2b))
if coordinate1_diff==1 or coordinate2_diff==1:
intronRetention = True
return intronRetention
class SplicingAnnotations(object):
def __init__(self, symbol, description,junc1,junc2,altExons,proteinPredictions,eventAnnotation,coordinates):
self.symbol = symbol
self.description = description
self.junc1 = junc1
self.junc2 = junc2
self.altExons = altExons
self.proteinPredictions = proteinPredictions
self.eventAnnotation = eventAnnotation
self.coordinates = coordinates
def Symbol(self): return self.symbol
def Description(self): return self.description
def Junc1(self): return self.junc1
def Junc2(self): return self.junc2
def AltExons(self): return self.altExons
def ProteinPredictions(self): return self.proteinPredictions
def EventAnnotation(self): return self.eventAnnotation
def Coordinates(self): return self.coordinates
def importPSIAnnotations(PSIpath):
header=True
count=0
annotations={}
for line in open(PSIpath,'rU').xreadlines():
line = line.rstrip('\n')
values = string.split(line,'\t')
if header:
sI = values.index('Symbol')
dI = values.index('Description')
eI = values.index('Examined-Junction')
bI = values.index('Background-Major-Junction')
aI = values.index('AltExons')
pI = values.index('ProteinPredictions')
vI = values.index('EventAnnotation')
cI = values.index('Coordinates')
header=False
else:
symbol = values[sI]
description = values[dI]
junc1 = values[eI]
junc2 = values[bI]
altExons = values[aI]
proteinPredictions = values[pI]
eventAnnotation = values[vI]
coordinates = values[cI]
key = symbol+':'+junc1+"|"+junc2
sa = SplicingAnnotations(symbol, description,junc1,junc2,altExons,proteinPredictions,eventAnnotation,coordinates)
annotations[key] = sa
return annotations
def updatePSIAnnotations(PSIpath, species, psievents, terminal_exons, junctionPairFeatures, junctionFeatures):
# write the updated psi file with the annotations into a new file and annotate events that have not been annotated by the junction files
#print len(psievents)
header=True
export_path = PSIpath[:-4]+'_EventAnnotation.txt'
export=open(export_path,'w')
count=0
for line in open(PSIpath,'rU').xreadlines():
line = line.rstrip('\n')
values = string.split(line,'\t')
if header:
try: fI = values.index('feature')
except: fI = values.index('EventAnnotation')
aI = values.index('AltExons')
try: pI = values.index('PME')
except: pI = values.index('ProteinPredictions')
cI = values.index('Coordinates')
values[fI] = 'EventAnnotation'
values[pI] = 'ProteinPredictions'
export.write(string.join(values,'\t')+'\n')
header=False
continue
psiJunction=string.split(line,'\t')
psiJunction_primary = psiJunction[2]
psiJunction_secondary = psiJunction[3]
key = psiJunction_primary,psiJunction_secondary
#psiJunction_primary=string.replace(psiJunction[2],"_",".")
#psiJunction_secondary=string.replace(psiJunction[3],"_",".")
event = psievents[psiJunction_primary,psiJunction_secondary].Event()
critical_exon = psievents[key].CriticalExon()
if key in junctionPairFeatures:
proteinAnnotation = string.join(junctionPairFeatures[key],'|')
elif (psiJunction_primary,) in junctionFeatures:
proteinAnnotation = string.join(junctionPairFeatures[(psiJunction_primary,)],'|')
#elif (psiJunction_secondary,) in junctionFeatures:
#proteinAnnotation = string.join(junctionPairFeatures[(psiJunction_secondary,)],'|')"""
else:
proteinAnnotation=''
values[pI] = proteinAnnotation
intronRetention = DetermineIntronRetention(values[cI])
if critical_exon in terminal_exons:
event = terminal_exons[critical_exon]
if intronRetention:
event = 'intron-retention'
try:
if event==None:
primary_exons=string.split(psiJunction[2],":")
secondary_exons=string.split(psiJunction[3],":")
if len(primary_exons)>2 or len(secondary_exons)>2:
values[fI] = 'trans-splicing'
export.write(string.join(values,'\t')+'\n')
continue
else:
primary_exonspos=string.split(primary_exons[1],"-")
secondary_exonspos=string.split(secondary_exons[1],"-")
if ('U0' in primary_exons[1]) or ('U0' in secondary_exons[1]):
if ('U0.' in primary_exonspos[0]) or ('U0.' in secondary_exonspos[0]):
values[fI] = 'altPromoter'
export.write(string.join(values,'\t')+'\n')
continue
else:
values[fI] = ''
export.write(string.join(values,'\t')+'\n')
continue
try: event = predictSplicingEventTypes(psiJunction_primary,psiJunction_secondary)
except Exception:
event = ''
values[fI] = event
export.write(string.join(values,'\t')+'\n')
continue
else:
values[fI] = event
values[aI] = critical_exon
count+=1
export.write(string.join(values,'\t')+'\n')
except Exception():
values[fI] = ''
export.write(string.join(values,'\t')+'\n')
#print count
return export_path
def predictSplicingEventTypes(junction1,junction2):
if 'I' not in junction1 and '_' in junction1:
junction1 = string.replace(junction1,'_','') ### allows this to be seen as an alternative splice site
if 'I' not in junction2 and '_' in junction2:
junction2 = string.replace(junction2,'_','') ### allows this to be seen as an alternative splice site
if 'I' in junction1:
forceError
if 'I' in junction2:
forceError
j1a,j1b = string.split(junction1,'-')
j2a,j2b = string.split(junction2,'-')
j1a = string.split(j1a,':')[1][1:]
j2a = string.split(j2a,':')[1][1:]
j1a,r1a = string.split(j1a,'.')
j1b,r1b = string.split(j1b[1:],'.')
j2a,r2a = string.split(j2a,'.')
j2b,r2b = string.split(j2b[1:],'.')
### convert to integers
j1a,r1a,j1b,r1b,j2a,r2a,j2b,r2b = map(lambda x: int(float(x)),[j1a,r1a,j1b,r1b,j2a,r2a,j2b,r2b])
splice_event=[]
if j1a == j2a and j1b==j2b: ### alt-splice site
if r1a == r2a: splice_event.append("alt-3'")
else: splice_event.append("alt-5'")
elif j1a == j2a: splice_event.append("cassette-exon")
elif j1b==j2b:
if 'E1.' in junction1: splice_event.append("altPromoter")
else: splice_event.append("cassette-exon")
elif 'E1.' in junction1 or 'E1.1' in junction2:
splice_event.append("altPromoter")
else:
splice_event.append("cassette-exon")
splice_event = unique.unique(splice_event)
splice_event.sort()
splice_event = string.join(splice_event,'|')
return splice_event
def parse_junctionfiles(resultsDir,species,platform):
""" Add splicing annotations for PSI results """
if 'top_alt' not in resultsDir:
PSIpath = resultsDir+'/'+species+'_'+platform+'_top_alt_junctions-PSI.txt'
else:
PSIpath = resultsDir
### Get all splice-junction pairs
psievents,psijunctions = importPSIevents(PSIpath,species)
### Get domain/protein predictions
junctionPairFeatures = importIsoformAnnotations(species,platform,psievents)
junctionPairFeatures = importIsoformAnnotations(species,platform,psievents,annotType='domain',junctionPairFeatures=junctionPairFeatures)
junctionFeatures = importIsoformAnnotations(species,platform,psijunctions,dataType='junction')
junctionFeatures = importIsoformAnnotations(species,platform,psijunctions,annotType='domain',junctionPairFeatures=junctionFeatures,dataType='junction')
### Get all de novo junction anntations (includes novel junctions)
psievents = importEventAnnotations(resultsDir,species,psievents,annotationType='de novo')
### Get all known junction annotations
psievents = importEventAnnotations(resultsDir,species,psievents)
### Import the annotations that provide alternative terminal events
terminal_exons = importDatabaseEventAnnotations(species,platform)
### Update our PSI annotation file with these improved predictions
export_path = updatePSIAnnotations(PSIpath, species, psievents, terminal_exons, junctionPairFeatures, junctionFeatures)
return export_path
if __name__ == '__main__':
import multiprocessing as mlp
import getopt
#bam_dir = "H9.102.2.6.bam"
#outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed'
platform = 'RNASeq'
species = 'Hs'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a directory containing BAM files as input in the command-line"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species=','platform=','array='])
for opt, arg in options:
if opt == '--i': resultsDir=arg
elif opt == '--species': species=arg
elif opt == '--platform': platform=arg
elif opt == '--array': platform=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
parse_junctionfiles(resultsDir,species,platform)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/AugmentEventAnnotations.py
|
AugmentEventAnnotations.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This script can be run on its own to extract a single BAM file at a time or
indirectly by multiBAMtoBED.py to extract junction.bed files (Tophat format)
from many BAM files in a single directory at once. Currently uses the Tophat
predicted Strand notation opt('XS') for each read. This can be substituted with
strand notations from other aligners (check with the software authors)."""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
import copy,getopt
import time
import traceback
try: import export
except Exception: pass
try: import unique
except Exception: pass
try:
import TabProxies
import ctabix
import csamtools
import cvcf
except Exception:
try:
if os.name != 'posix': print traceback.format_exc()
except Exception: pass
def getSpliceSites(cigarList,X):
cummulative=0
coordinates=[]
for (code,seqlen) in cigarList:
if code == 0:
cummulative+=seqlen
if code == 3:
#if strand == '-':
five_prime_ss = str(X+cummulative)
cummulative+=seqlen ### add the intron length
three_prime_ss = str(X+cummulative+1) ### 3' exon start (prior exon splice-site + intron length)
coordinates.append([five_prime_ss,three_prime_ss])
up_to_intron_dist = cummulative
return coordinates, up_to_intron_dist
def writeJunctionBedFile(junction_db,jid,o):
strandStatus = True
for (chr,jc,tophat_strand) in junction_db:
if tophat_strand==None:
strandStatus = False
break
if strandStatus== False: ### If no strand information in the bam file filter and add known strand data
junction_db2={}
for (chr,jc,tophat_strand) in junction_db:
original_chr = chr
if 'chr' not in chr:
chr = 'chr'+chr
for j in jc:
try:
strand = splicesite_db[chr,j]
junction_db2[(original_chr,jc,strand)]=junction_db[(original_chr,jc,tophat_strand)]
except Exception: pass
junction_db = junction_db2
for (chr,jc,tophat_strand) in junction_db:
x_ls=[]; y_ls=[]; dist_ls=[]
read_count = str(len(junction_db[(chr,jc,tophat_strand)]))
for (X,Y,dist) in junction_db[(chr,jc,tophat_strand)]:
x_ls.append(X); y_ls.append(Y); dist_ls.append(dist)
outlier_start = min(x_ls); outlier_end = max(y_ls); dist = str(max(dist_ls))
exon_lengths = outlier_start
exon_lengths = str(int(jc[0])-outlier_start)+','+str(outlier_end-int(jc[1])+1)
junction_id = 'JUNC'+str(jid)+':'+jc[0]+'-'+jc[1] ### store the unique junction coordinates in the name
output_list = [chr,str(outlier_start),str(outlier_end),junction_id,read_count,tophat_strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
o.write(string.join(output_list,'\t')+'\n')
def writeIsoformFile(isoform_junctions,o):
for coord in isoform_junctions:
isoform_junctions[coord] = unique.unique(isoform_junctions[coord])
if '+' in coord:
print coord, isoform_junctions[coord]
if '+' in coord:
sys.exit()
def verifyFileLength(filename):
count = 0
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def retreiveAllKnownSpliceSites(returnExonRetention=False,DesignatedSpecies=None,path=None):
### Uses a priori strand information when none present
import export, unique
chromosomes_found={}
try: parent_dir = export.findParentDir(bam_file)
except Exception: parent_dir = export.findParentDir(path)
species = None
for file in os.listdir(parent_dir):
if 'AltAnalyze_report' in file and '.log' in file:
log_file = unique.filepath(parent_dir+'/'+file)
log_contents = open(log_file, "rU")
species_tag = ' species: '
for line in log_contents:
line = line.rstrip()
if species_tag in line:
species = string.split(line,species_tag)[1]
if species == None:
try: species = IndicatedSpecies
except Exception: species = DesignatedSpecies
splicesite_db={}
gene_coord_db={}
try:
if ExonReference==None:
exon_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
length = verifyFileLength(exon_dir)
except Exception:
#print traceback.format_exc();sys.exit()
length = 0
if length==0:
exon_dir = ExonReference
refExonCoordinateFile = unique.filepath(exon_dir)
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
spliceEvent = t[-2]
#start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
try:
gene_coord_db[geneID,chr].append(int(start))
gene_coord_db[geneID,chr].append(int(stop))
except Exception:
gene_coord_db[geneID,chr] = [int(start)]
gene_coord_db[geneID,chr].append(int(stop))
if returnExonRetention:
if 'exclusion' in spliceEvent or 'exclusion' in spliceEvent:
splicesite_db[geneID+':'+exon]=[]
else:
splicesite_db[chr,start]=strand
splicesite_db[chr,stop]=strand
if len(chr)<5 or ('GL0' not in chr and 'GL' not in chr and 'JH' not in chr and 'MG' not in chr):
chromosomes_found[string.replace(chr,'chr','')] = []
for i in gene_coord_db:
gene_coord_db[i].sort()
gene_coord_db[i] = [gene_coord_db[i][0],gene_coord_db[i][-1]]
return splicesite_db,chromosomes_found,gene_coord_db
def parseJunctionEntries(bam_dir,multi=False, Species=None, ReferenceDir=None):
global bam_file
global splicesite_db
global IndicatedSpecies
global ExonReference
IndicatedSpecies = Species
ExonReference = ReferenceDir
bam_file = bam_dir
splicesite_db={}; chromosomes_found={}
start = time.time()
try: import collections; junction_db=collections.OrderedDict()
except Exception:
try: import ordereddict; junction_db = ordereddict.OrderedDict()
except Exception: junction_db={}
original_junction_db = copy.deepcopy(junction_db)
bamf = pysam.Samfile(bam_dir, "rb" )
### Is there are indexed .bai for the BAM? Check.
try:
for entry in bamf.fetch():
codes = map(lambda x: x[0],entry.cigar)
break
except Exception:
### Make BAM Index
if multi == False:
print 'Building BAM index file for', bam_dir
bam_dir = str(bam_dir)
#On Windows, this indexing step will fail if the __init__ pysam file line 51 is not set to - catch_stdout = False
pysam.index(bam_dir)
bamf = pysam.Samfile(bam_dir, "rb" )
chromosome = False
barcode_pairs={}
bam_reads=0
count=0
jid = 1
prior_jc_start=0
import Bio; from Bio.Seq import Seq
l1 = None; l2=None
o = open (string.replace(bam_dir,'.bam','.export2.txt'),"w")
spacer='TGGT'
for entry in bamf.fetch():
#if entry.query_name == 'M03558:141:GW181002:1:2103:13361:6440':
if spacer in entry.seq:
if entry.seq.index(spacer) == 14:
viral_barcode = entry.seq[:48]
try:
mate = bamf.mate(entry)
mate_seq = Seq(mate.seq)
cell_barcode=str(mate_seq.reverse_complement())[:16]
if (viral_barcode,cell_barcode) not in barcode_pairs:
o.write(viral_barcode+'\t'+cell_barcode+'\n')
barcode_pairs[viral_barcode,cell_barcode]=[]
if 'ATAGCGGGAACATGTGGTCATGGTACTGACGTTGACACGTACGTCATA' == viral_barcode:
print entry.query_name, cell_barcode, mate_seq
except:
pass
#print viral_barcode, mate.seq;sys.exit()
count+=1
#if count==100: sys.exit()
bamf.close()
o.close()
if __name__ == "__main__":
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoJunctionBED.py --i /Users/me/sample1.bam"
sys.exit()
else:
Species = None
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species='])
for opt, arg in options:
if opt == '--i': bam_dir=arg ### full path of a BAM file
elif opt == '--species': Species=arg ### species for STAR analysis to get strand
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
try: parseJunctionEntries(bam_dir,Species=Species)
except ZeroDivisionError:
print [sys.argv[1:]],'error'; error
""" Benchmarking notes: On a 2017 MacBook Pro with 16GB of RAM and a local 7GB BAM file (solid drive), 9 minutes (526s) to complete writing a junction.bed.
To simply search through the file without looking at the CIGAR, the script takes close to 5 minutes (303s)"""
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BAMtoBarcode.py
|
BAMtoBarcode.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import csv
import scipy.io
from scipy import sparse, stats, io
import numpy
import time
import math
from scipy import sparse, stats
import gzip
try:
import h5py
except:
print ('Missing the h5py library (hdf5 support)...')
def import10XSparseMatrix(matrices_dir,genome,dataset_name, expFile=None, log=True):
start_time = time.time()
if '.h5' in matrices_dir:
h5_filename = matrices_dir
f = h5py.File(h5_filename, 'r')
genome = None
if 'matrix' in f:
# CellRanger v3
barcodes = list(f['matrix']['barcodes'])
gene_ids = f['matrix']['features']['id']
gene_names = f['matrix']['features']['name']
mat = sparse.csc_matrix((f['matrix']['data'], f['matrix']['indices'], f['matrix']['indptr']), shape=f['matrix']['shape'])
else:
# CellRanger v2
possible_genomes = f.keys()
if len(possible_genomes) != 1:
raise Exception("{} contains multiple genomes ({}). Explicitly select one".format(h5_filename, ", ".join(possible_genomes)))
genome = possible_genomes[0]
mat = sparse.csc_matrix((f[genome]['data'], f[genome]['indices'], f[genome]['indptr']))
gene_names = f[genome]['gene_names']
barcodes = list(f[genome]['barcodes'])
gene_ids = f[genome]['genes']
else:
#matrix_dir = os.path.join(matrices_dir, genome)
matrix_dir = matrices_dir
mat = scipy.io.mmread(matrix_dir)
genes_path = string.replace(matrix_dir,'matrix.mtx','genes.tsv')
barcodes_path = string.replace(matrix_dir,'matrix.mtx','barcodes.tsv')
if os.path.isfile(genes_path)==False:
genes_path = string.replace(matrix_dir,'matrix.mtx','features.tsv')
if '.gz' in genes_path:
gene_ids = [row[0] for row in csv.reader(gzip.open(genes_path), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(gzip.open(genes_path), delimiter="\t")]
barcodes = [row[0] for row in csv.reader(gzip.open(barcodes_path), delimiter="\t")]
else:
gene_ids = [row[0] for row in csv.reader(open(genes_path), delimiter="\t")]
print gene_ids[0:10]
gene_names = [row[1] for row in csv.reader(open(genes_path), delimiter="\t")]
barcodes = [row[0] for row in csv.reader(open(barcodes_path), delimiter="\t")]
#barcodes = map(lambda x: string.replace(x,'-1',''), barcodes) ### could possibly cause issues with comparative analyses
matrices_dir = os.path.abspath(os.path.join(matrices_dir, os.pardir))
### Write out raw data matrix
counts_path = matrices_dir+'/'+dataset_name+'_matrix.txt'
if expFile!=None:
if 'exp.' in expFile:
counts_path = string.replace(expFile,'exp.','counts.')
### Efficiently write the data to an external file (fastest way)
mat_array_original = mat.toarray() ### convert sparse matrix to numpy array
mat_array = numpy.ndarray.tolist(mat_array_original) ### convert to non-numpy list
i=0
updated_mat=[['UID']+barcodes]
for ls in mat_array:
updated_mat.append([gene_names[i]]+ls); i+=1
mat_array = numpy.array(mat_array)
updated_mat = numpy.array(updated_mat)
numpy.savetxt(counts_path,updated_mat,fmt='%s',delimiter='\t')
del updated_mat
print 'Raw-counts written to file:'
print counts_path
#print time.time()-start_time
### Write out CPM normalized data matrix
if expFile==None:
norm_path = matrices_dir+'/'+dataset_name+'_matrix_CPTT.txt'
else:
norm_path = expFile ### AltAnalyze designated ExpressionInput file (not counts)
print 'Normalizing gene counts to counts per ten thousand (CPTT)'
barcode_sum = numpy.sum(mat_array,axis=0) ### Get the sum counts for all barcodes, 0 is the y-axis in the matrix
start_time = time.time()
mat_array = mat_array.transpose()
def calculateCPTT(val,barcode_sum):
if val==0:
return '0'
else:
if log:
return math.log((10000.00*val/barcode_sum)+1.0,2) ### convert to log2 expression
else:
return 10000.00*val/barcode_sum
vfunc = numpy.vectorize(calculateCPTT)
norm_mat_array=[]
i=0
for vector in mat_array:
norm_mat_array.append(vfunc(vector,barcode_sum[i]))
i+=1
mat_array = numpy.array(norm_mat_array)
mat_array = mat_array.transpose()
mat_array = numpy.ndarray.tolist(mat_array) ### convert to non-numpy list
i=0
updated_mat=[['UID']+barcodes]
for ls in mat_array:
updated_mat.append([gene_names[i]]+ls); i+=1
updated_mat = numpy.array(updated_mat)
del mat_array
numpy.savetxt(norm_path,updated_mat,fmt='%s',delimiter='\t')
#print time.time()-start_time
print 'CPTT written to file:',
print norm_path
return norm_path
if __name__ == '__main__':
import getopt
filter_rows=False
filter_file=None
genome = 'hg19'
dataset_name = '10X_filtered'
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Insufficient options provided";sys.exit()
#Filtering samples in a datasets
#python 10XProcessing.py --i /Users/test/10X/outs/filtered_gene_bc_matrices/ --g hg19 --n My10XExperiment
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','n='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--i': matrices_dir=arg
elif opt == '--g': genome=arg
elif opt == '--n': dataset_name=arg
import10XSparseMatrix(matrices_dir,genome,dataset_name)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/ChromiumProcessing.py
|
ChromiumProcessing.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains methods for reading in OBO format Gene Ontology files and building
numeric nested hierarchy paths (e.g., reconstructing the directed acyclic graph), importing
prebuilt hiearchy paths, creating nested Ontology associations from existing gene-Ontology files."""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import os.path, platform
import unique
import math
import shutil
import time
import gene_associations
import copy
################# Parse directory files
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or ".ontology" in entry or '.obo' in entry: dir_list2.append(entry)
return dir_list2
###### Classes ######
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
try: files = getDirectoryFiles(self.data,str(search_term))
except Exception:
files = [] ### directory doesn't exist
#print self.data, "doesn't exist"
return files
def getDirectoryFiles(import_dir, search_term):
matching_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: matching_files.append(affy_data_dir)
return matching_files
################# Import and Annotate Data
def eliminate_redundant_dict_values(database):
db1={}
for key in database: list = unique.unique(database[key]); list.sort(); db1[key] = list
return db1
class OntologyPath:
def __init__(self,ontology_id,ontology_term,current_level,rank,path,specific_type):
self._ontology_id = ontology_id; self._ontology_term = ontology_term; self._current_level = current_level
self._rank = rank; self._path = path; self._specific_type = specific_type
def OntologyID(self): return self._ontology_id
def OntologyIDStr(self): return self._ontology_id[3:]
def OntologyTerm(self): return self._ontology_term
def OntologyLevel(self): return self._current_level
def OntologyType(self): return self._specific_type
def Rank(self): return self._rank
def PathStr(self):
path_index = pathToString(self.PathList())
return path_index
def PathList(self): return self._path
def PathTuple(self): return tuple(self._path)
def Report(self):
output = self.OntologyID()+'|'+self.OntologyTerm()
return output
def __repr__(self): return self.Report()
def pathToString(path_list):
path_str=[]
for path_int in path_list: path_str.append(str(path_int))
path_index = string.join(path_str,'.')
return path_index
class OntologyTree:
def __init__(self,ontology_id,ontology_term,ontology_type):
self._ontology_id = ontology_id; self._ontology_term = ontology_term; self._ontology_type = ontology_type
def OntologyID(self): return self._ontology_id
def OntologyTerm(self): return self._ontology_term
def OntologyType(self): return self._ontology_type
def setOntologyType(self,ontology_type): self._ontology_type=ontology_type
def Report(self):
output = self.OntologyID()+'|'+self.OntologyTerm()
return output
def __repr__(self): return self.Report()
class OntologyTreeDetailed(OntologyTree):
###Class not currently used
def __init__(self,ontology_id,ontology_term,ontology_type,parent_ontology_id,relation):
self._ontology_id = ontology_id; self._ontology_term = ontology_term; self._ontology_type = ontology_type
self._parent_ontology_id = parent_ontology_id; self._relation = relation
def ParentOntologyID(self): return self._parent_ontology_id
def Relation(self): return self._relation
def Report(self):
output = self.OntologyID()+'|'+self.OntologyTerm()
return output
def __repr__(self): return self.Report()
###################################### UPDATED OBO CODE - BEGIN
def nestTree(parent_node,path,export_data,count_nodes):
### export_data,count_nodes are used for QC only
children = edges[parent_node]
path.append(0)
for child in children.keys():
tuple_path = tuple(path)
#count_nodes+=1
#try: temp = string.join(edges[child].keys(),'|')
#except Exception: temp = ''
#export_data.write(str(tuple_path)+'\t'+child+'\t'+temp+'\n')
p = list(path) ### Otherwise, the same path somehow gets used (alternative to copy.deepcopy())
if child in edges:
count_nodes = nestTree(child,p,export_data,count_nodes)
#if count_nodes==1000: kill
path_ontology_db[tuple_path] = child
if child not in built_ontology_paths:
built_ontology_paths[child] = [tuple_path]
elif tuple_path not in built_ontology_paths[child]:
built_ontology_paths[child].append(tuple_path)
path[-1]+=1
return count_nodes
def importOBONew(filedir,path,specific_type,rank):
if specific_type == '': discover_root = 'yes'
else: discover_root = 'no'
global edges
#print [discover_root,specific_type,path]
fn=filepath(filedir); x=0; stored={}; edges={}; category = 'null'; all_children={}; ontology_annotations_extra={}
ontology_id=''; ontology_term=''; edge_count=0; root_node = None
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
s = string.split(data,' '); d = string.split(data,':')
if x == 0:
x=1
if x > 0:
#if s[0]=='def:': definition = d[1]
if s[0]=='id:':
try:
ontology_id = s[1]
#ontology_id=string.split(ontology_id,':')[1]
category = 'null'
except Exception: null=[]; ontology_id = ''; ontology_term = ''
if s[0]=='namespace:': category = s[1]
if s[0]=='name:':
ontology_term = d[1][1:]
if ontology_term == specific_type:
root_node = ontology_id
if category == specific_type or discover_root=='yes':
if s[0]=='is_a:': ### Note: sometimes there are multiple parents indicated for a single child
parent = s[1] ### immediate parent node
#parent=string.split(parent,':')[1]
if parent in edges: ### each child has one parent, one parent can have many children
children = edges[parent]
children[ontology_id]=[]
else: children = {ontology_id:[]}
edges[parent]=children
edge_count+=1
if discover_root=='yes': all_children[ontology_id] = []
if ontology_id not in ontology_annotations:
gt = OntologyTree(ontology_id,ontology_term,specific_type)
ontology_annotations[ontology_id] = gt
elif root_node == ontology_id: ### For example, biological process
gt = OntologyTree(ontology_id,ontology_term,specific_type)
ontology_annotations[ontology_id] = gt
elif ontology_id != '' and ontology_term != '':
gt = OntologyTree(ontology_id,ontology_term,specific_type)
ontology_annotations_extra[ontology_id] = gt
if discover_root=='yes':
### The root node should not exist as a child node
for parent in edges:
if parent not in all_children: root_node = parent
specific_type = ontology_annotations_extra[root_node].OntologyTerm()
#print 'Parent node assigned to:',specific_type
### Assing the root_node name as the Ontology-Type
for ontology_id in ontology_annotations:
ontology_annotations[ontology_id].setOntologyType(specific_type)
if root_node == None:
print 'NO ROOT NODE IDENTIFIED... SHOULD BE:', specific_type
print filedir; kill
if len(path)==0: path.append(0); path_ontology_db[tuple(path)] = root_node; return_path = list(path); #print [tuple(path)]
else: path = [path[0]+1]; path_ontology_db[tuple(path)] = root_node; return_path = list(path); #print [tuple(path)]
#export_data = export.ExportFile('OBO/test.txt')
export_data=''
nestTree(root_node,path,export_data,0)
#export_data.close()
#print 'Tree built'
for path in path_ontology_db:
path_dictionary[path]=[path]
###Build nested Path-index
path_len = len(path); i=-1
while path_len+i > 0:
parent_path = path[:i]
if parent_path in path_dictionary: path_dictionary[parent_path].append(path)
i-=1
print edge_count,'edges and',len(ontology_annotations), 'Ontology annotations imported for',specific_type
#print [[[return_path]]]
return path_ontology_db,built_ontology_paths,ontology_annotations,path_dictionary,return_path,rank
###################################### UPDATED OBO CODE - END
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def swapKeyValues(db):
swapped={}
for key in db:
values = list(db[key]) ###If the value is not a list, make a list
for value in values:
try: swapped[value].append(key)
except KeyError: swapped[value] = [key]
swapped = eliminate_redundant_dict_values(swapped)
return swapped
def exportCurrentOntologyBuild(path_ontology_db,ontology_annotations,ontology_type, display=False):
program_type,database_dir = unique.whatProgramIsThis(); parent_dir = ''
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite/'
new_file = parent_dir+'OBO/builds/built_'+ontology_type+'_paths.txt'
try: fn=filepath(new_file); data = open(fn,'w')
except Exception:
new_dir = parent_dir+'OBO/builds'; fn = filepath(new_dir)
os.mkdir(fn) ###Re-Create directory if deleted
fn=filepath(new_file); data = open(fn,'w')
data.write('Path'+'\t'+'ontology_id'+'\n')
for path in path_ontology_db:
ontology_id = path_ontology_db[path]; path = pathToString(path)
data.write(path +'\t'+ ontology_id +'\n')
data.close()
new_file = parent_dir+'OBO/builds/'+ontology_type+'_annotations.txt'
fn=filepath(new_file); data = open(fn,'w')
data.write('ontology_id'+'\t'+'Ontology Name'+'\t'+'Ontology Type'+'\n')
for ontology_id in ontology_annotations:
s = ontology_annotations[ontology_id]
data.write(ontology_id +'\t'+ s.OntologyTerm() +'\t'+ s.OntologyType() +'\n')
data.close()
def convertStrListToIntList(path):
path_int=[]
for str in path: path_int.append(int(str))
return path_int
def importPreviousOntologyAnnotations(target_ontology_type):
ontology_annotations={}
program_type,database_dir = unique.whatProgramIsThis(); parent_dir = ''
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite/'
if target_ontology_type == 'GeneOntology': target_ontology_type = 'go'
filename = parent_dir+'OBO/builds/'+target_ontology_type+'_annotations.txt'; fn=filepath(filename); x=0
for line in open(fn,'r').xreadlines():
if x==0: x=1 ###Skip the title line
else:
data = cleanUpLine(line)
ontology_id,ontology_name,ontology_type = string.split(data,'\t')
if ':' not in ontology_id: ontology_id = 'GO:'+ontology_id
if ontology_name[0]== ' ': ontology_name = ontology_name[1:]
s = OntologyTree(ontology_id,ontology_name,ontology_type)
ontology_annotations[ontology_id] = s
return ontology_annotations
def importPreviousOntologyBuild(ontology_type,display=True):
program_type,database_dir = unique.whatProgramIsThis(); parent_dir = ''
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite/'
if ontology_type == 'GeneOntology': ontology_type = 'go'
filename = parent_dir+'OBO/builds/built_'+ontology_type+'_paths.txt'; fn=filepath(filename); x=0; count=0
for line in open(fn,'r').xreadlines(): count+=1
original_increment = int(count/10); increment = original_increment
try: ### This reduces run-time for the typical analysis where the databases are in sync and up-to-date
if run_mappfinder == 'yes':
if verified_nested == 'no':
build_nestedDB='yes'
else: build_nestedDB = 'no'
else: build_nestedDB = 'no'
except Exception: build_nestedDB = 'yes'
for line in open(fn,'r').xreadlines():
if x==0: x+=1 ###Skip the title line
else:
x+=1
if x == increment and display: increment+=original_increment; print '*',
data = cleanUpLine(line)
path,ontology_id = string.split(data,'\t')
path = tuple(map(int,string.split(path,'.')))
#path = string.split(path_str,'.'); path = convertStrListToIntList(path); path = tuple(path)
#s = OntologyPath(ontology_id,'','','',path,''); s = OntologyPathAbr(ontology_id,path)
if ':' not in ontology_id: ontology_id = 'GO:'+ontology_id
path_ontology_db[path] = ontology_id
try: built_ontology_paths[ontology_id].append(path)
except KeyError: built_ontology_paths[ontology_id] = [path]
if build_nestedDB == 'yes':
path_dictionary[path]=[path]
###All of the paths need to be added before
if build_nestedDB == 'yes':
if build_nestedDB == 'yes':
for path in path_dictionary:
###Build nested Path-index
path_len = len(path); i=-1
while path_len+i > 0:
parent_path = path[:i]
try: path_dictionary[parent_path].append(path)
except Exception: null=[]
i-=1
#### Import gene data and associate with Nested Ontology
def grabNestedOntologyIDs():
nested_ontology_tree={}
for path in path_dictionary:
parent_ontology_id = path_ontology_db[path]
child_ontology_list=[]
for child_path in path_dictionary[path]:
child_ontology_id = path_ontology_db[child_path]; child_ontology_list.append(child_ontology_id)
child_ontology_list = unique.unique(child_ontology_list)
nested_ontology_tree[parent_ontology_id] = child_ontology_list
return nested_ontology_tree
def linkGenesToNestedOntology(ontology_to_gene):
nested_ontology_genes={}; made_unique={}; x=0
original_increment = int(len(nested_ontology_tree)/10); increment = original_increment
for parent_ontology_id in nested_ontology_tree:
x+=1
if x == increment: increment+=original_increment; print '*',
for child_ontology_id in nested_ontology_tree[parent_ontology_id]: ### This list of ontology_ids includes the parent, since it is the first entry in path_dictionary
if child_ontology_id in ontology_to_gene:
ensembls=ontology_to_gene[child_ontology_id]
for ensembl in ensembls:
try:
ens_db = nested_ontology_genes[parent_ontology_id]
ens_db[ensembl] = ''
except KeyError:
ens_db = {}; ens_db[ensembl] = ''; e = ens_db
nested_ontology_genes[parent_ontology_id] = e
return nested_ontology_genes
def exportVersionData(version,version_date,dir):
### Used by the module UI
program_type,database_dir = unique.whatProgramIsThis(); parent_dir = ''
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite/'
elif 'OBO' in dir or 'Config' in dir: parent_dir = ''
else: parent_dir = database_dir
dir = parent_dir+dir
global current_version; current_version = version
global current_version_date; current_version_date = version_date
new_file = dir+'version.txt'
data = export.ExportFile(new_file)
data.write(str(version)+'\t'+str(version_date)+'\n'); data.close()
def exportOntologyRelationships(nested_ontology_gene,gene_to_source_id,mod,source_type,ontology_type):
program_type,database_dir = unique.whatProgramIsThis()
if ontology_type == 'GeneOntology': ontology_type = 'GO'
new_file = database_dir+'/'+species_code+'/nested/'+mod+'_to_Nested-'+ontology_type+'.txt'
data = export.ExportFile(new_file)
title = [mod,'ontology_id']; title_str = string.join(title,'\t')
data.write(title_str+'\n')
for ontology_id in nested_ontology_gene:
for gene in nested_ontology_gene[ontology_id]:
output_list = [gene,ontology_id]
output_str = string.join(output_list,'\t')
data.write(output_str+'\n')
data.close()
print new_file, 'saved to disk'
#### Main functions that grab data from above functions
def remoteImportOntologyTree(ontology_type):
global built_ontology_paths; global path_ontology_db; global path_dictionary
built_ontology_paths={}; path_ontology_db={}; path_dictionary={}
importPreviousOntologyBuild(ontology_type)
return built_ontology_paths, path_ontology_db, path_dictionary
def buildNestedOntologyTree(mappfinder,display=True):
program_type,database_dir = unique.whatProgramIsThis(); parent_dir = ''
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite/'
global run_mappfinder; run_mappfinder = mappfinder
###Import all the OBO Ontology tree information from http:/www.geneontology.org/
import_dir = '/'+parent_dir+'OBO'; global Ontology_version; path=[]; rank=0
c = GrabFiles(); c.setdirectory(import_dir)
file_dirs = c.searchdirectory('.ontology')
file_dirs += c.searchdirectory('.obo')
file_dirs.reverse()
x = file_dirs[1:]+file_dirs[0:1] ###Reorganize to mimic GenMAPP order
start_time = time.time()
ontology_type = ''
#print file_dirs
for file_dir in file_dirs:
try:
if '.obo' in file_dir or '.ontology' in file_dir:
if 'gene_ontology' in file_dir or 'goslim' in file_dir:
ontology_type = 'GeneOntology'
if 'goslim' in file_dir: ontology_type = 'GOSlim'
###Import the 3 main Ontology files and index them so that the first path corresponds to the Ontology type - Software checks the date before parsing
path_ontology_db,built_ontology_paths,ontology_annotations,path_dictionary,path,rank = importOBONew(file_dir,path,'biological_process',rank)
try: path_ontology_db,built_ontology_paths,ontology_annotations,path_dictionary,path,rank = importOBONew(file_dir,path,'molecular_function',rank)
except Exception: null=[] ### Sometimes missing from GO-Slim
path_ontology_db,built_ontology_paths,ontology_annotations,path_dictionary,path,rank = importOBONew(file_dir,path,'cellular_component',rank)
else:
ontology_type = getOntologyType(file_dir)
path_ontology_db,built_ontology_paths,ontology_annotations,path_dictionary,path,rank = importOBONew(file_dir,path,'',rank)
deleteNestedOntologyFiles(ontology_type) ### Necessary to trigger an update for all species
else:
if display: print 'The ontology format present in',file_dir,'is no longer supported.'
exportCurrentOntologyBuild(path_ontology_db,ontology_annotations,ontology_type,display=display)
except Exception:
pass ### If an Ontology file fails download, it still may create an empty file that will screw up the processing of other obo files - just skip it
end_time = time.time(); time_diff = int(end_time-start_time)
if display: print "Ontology categories imported and nested in %d seconds" % time_diff
def getOntologyType(file_dir):
ontology_type = string.split(file_dir,'/')[-1]
if '_' in ontology_type:
ontology_type = string.split(ontology_type,'_')[0]+'Ontology'
else:
ontology_type = string.split(ontology_type,'.')[0]+'Ontology'
return ontology_type
def deleteNestedOntologyFiles(ontology_type):
program_type,database_dir = unique.whatProgramIsThis()
current_species_dirs = unique.read_directory('/'+database_dir)
for species_code in current_species_dirs:
c = GrabFiles(); c.setdirectory('/'+database_dir+'/'+species_code+'/nested')
if ontology_type == 'GeneOntology': ontology_type = 'GO'
file_dirs = c.searchdirectory('-'+ontology_type) ### list all nested files referencing the Ontology type
for file in file_dirs:
try: os.remove(filepath(database_dir+'/'+species_code+'/nested/'+file))
except Exception: null=[]
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>3: break
except Exception: null=[]
return count
def verifyNestedFileCreation(species,mod_types,ontology_type):
### Determine which mods are present for Ontology
program_type,database_dir = unique.whatProgramIsThis()
mods_present = []; nested_present=[]; verified = 'no'
for mod in mod_types:
ontology_file = database_dir+'/'+species+'/gene-go/'+mod+'-'+ontology_type+'.txt'
count = verifyFileLength(ontology_file) ### See if there are lines present in the file (if present)
if count>1: mods_present.append(mod)
if len(mods_present)>0:
for mod in mods_present:
if ontology_type == 'GeneOntology': ontology_type = 'GO'
ontology_file = database_dir+'/'+species+'/nested/'+mod+'_to_Nested-'+ontology_type+'.txt'
count = verifyFileLength(ontology_file) ### See if there are lines present in the file (if present)
if count>1: nested_present.append(mod)
if len(nested_present) == len(mods_present): verified = 'yes'
return verified
def findAvailableOntologies(species,mod_types):
program_type,database_dir = unique.whatProgramIsThis()
c = GrabFiles(); c.setdirectory('/'+database_dir+'/'+species+'/gene-go'); file_dirs=[]
for mod in mod_types:
file_dirs+= c.searchdirectory(mod+'-')
avaialble_ontologies=[]
for filedir in file_dirs:
ontology_type = string.split(filedir,'-')[-1][:-4] ### remove the .txt
avaialble_ontologies.append(ontology_type)
avaialble_ontologies = unique.unique(avaialble_ontologies)
return avaialble_ontologies
def moveOntologyToArchiveDir(display=True):
### Move any existing OBO files to an archived directory as to not combine new with old annotations
program_type,database_dir = unique.whatProgramIsThis(); parent_dir = ''
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite/'
c = GrabFiles()
c.setdirectory('/'+parent_dir+'OBO')
file_dirs = c.searchdirectory('.ontology')+c.searchdirectory('.obo')
for file_dir in file_dirs:
new_file_dir = string.replace(file_dir,parent_dir+'OBO/',parent_dir+'OBO/archive/')
if display: print 'Moving:',file_dir,'to:',new_file_dir
export.customFileMove(file_dir,new_file_dir)
if len(file_dirs)==0:
c.setdirectory('/'+'OBO')
file_dirs = c.searchdirectory('.ontology')+c.searchdirectory('.obo')
for file_dir in file_dirs:
new_file_dir = string.replace(file_dir,'OBO/',parent_dir+'OBO/')
if display: print 'Moving:',file_dir,'to:',new_file_dir
export.customFileMove(file_dir,new_file_dir)
def buildNestedOntologyAssociations(species,mod_types,target_ontology_type,display=True):
global species_code; species_code = species; global verified_nested
global path_dictionary; path_dictionary={}
global built_ontology_paths; built_ontology_paths={}
global ontology_annotations; ontology_annotations={}
global path_ontology_db; path_ontology_db={}
if ('Linux' in platform.system()): mappfinder_db_input_dir = species_code+'/nested/'
else: mappfinder_db_input_dir = '/'+species_code+'/nested/'
buildNestedOntologyTree('yes') ### Checks the OBO directory to process new ontology files (if there)
moveOntologyToArchiveDir(display=display) ### Move any new read ontology files to
avaialble_ontologies = findAvailableOntologies(species,mod_types)
verified_nested_db={}
for ontology_type in avaialble_ontologies:
### This module verifies that the nested files are present (no longer considers database versions)
verified_nested = verifyNestedFileCreation(species,mod_types,ontology_type)
verified_nested_db[ontology_type] = verified_nested
verified_nested = verified_nested_db[target_ontology_type]
importPreviousOntologyBuild(target_ontology_type,display=display) ### populates the global variables we return below
if verified_nested == 'no': ### modified this code such that any version change warrants a rebuild and if reset by BuildEntrezAffymetrixAssociations or other, that it triggers a rebuild
if display: print 'Building %s Ontology nested gene association files for %s' % (target_ontology_type,species_code)
###Build Gene to Ontology associations for all MODs and export these for re-import by the MAPPFinder module
global nested_ontology_tree
nested_ontology_tree = grabNestedOntologyIDs()
for mod in mod_types:
try:
start_time = time.time()
mod_to_ontology = gene_associations.importGeneToOntologyData(species_code,mod,'null',target_ontology_type)
ontology_to_mod = swapKeyValues(mod_to_ontology); total_gene_count = len(mod_to_ontology); mod_to_ontology=[]
###Obtain a database of ontology_ids with all nested gene associations
nested_ontology_mod = linkGenesToNestedOntology(ontology_to_mod)
exportOntologyRelationships(nested_ontology_mod,{},mod,'',target_ontology_type)
end_time = time.time(); time_diff = int(end_time-start_time)
if display: print "Ontology Nested Lists Process/Created in %d seconds" % time_diff
except Exception:
if mod != 'HMDB':
None ### optionally indicate if a MOD doesn't have local files supporting the creation of a nested set
#print mod, 'associated files not present!'
return built_ontology_paths, path_ontology_db, path_dictionary
def speciesData():
program_type,database_dir = unique.whatProgramIsThis()
filename = 'Config/species.txt'
fn=filepath(filename); global species_list; species_list=[]; global species_codes; species_codes={}
for line in open(fn,'r').readlines():
data = cleanUpLine(line)
abrev,species = string.split(data,'\t')
species_list.append(species)
species_codes[species] = abrev
def sourceData():
program_type,database_dir = unique.whatProgramIsThis()
filename = 'Config/source_data.txt'
fn=filepath(filename)
global source_types; source_types=[]
global system_codes; system_codes={}
global mod_types; mod_types=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t'); source=t[0]
try: system_code=t[1]
except IndexError: system_code = 'NuLL'
if len(t)>2: ### Therefore, this ID system is a potential MOD
if t[2] == 'MOD': mod_types.append(source)
if source not in mod_types: source_types.append(source)
system_codes[system_code] = source ###Used when users include system code data in their input file
if __name__ == '__main__':
"""This module imports Ontology hierarchy data, nests it, outputs it to GO-Elite and associates
gene level data with nested Ontology terms for MAPPFinder"""
species_code = 'Hs'; mod_types = ['Ensembl','EntrezGene']; ontology_type = 'MPhenoOntology'
buildNestedOntologyAssociations(species_code,mod_types,ontology_type); sys.exit()
#!/usr/bin/python
###########################
#Program: GO-elite.py
#Author: Nathan Salomonis
#Date: 12/12/06
#Website: http://www.genmapp.org
#Email: [email protected]
###########################
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/OBO_import.py
|
OBO_import.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os
import math
import traceback
try: from stats_scripts import statistics
except Exception: pass
def makeTestFile():
all_data = [['name','harold','bob','frank','sally','kim','tim'],
['a','0','0','1','2','0','5'],['b','0','0','1','2','0','5'],
['c','0','0','1','2','0','5'],['d','0','0','1','2','0','5']]
input_file = 'test.txt'
export_object = open(input_file,'w')
for i in all_data:
export_object.write(string.join(i,'\t')+'\n')
export_object.close()
return input_file
def filterFile(input_file,output_file,filter_names,force=False,calculateCentroids=False,comparisons=[],log2=False):
if calculateCentroids:
filter_names,group_index_db=filter_names
export_object = open(output_file,'w')
firstLine = True
row_count=0
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
if '.csv' in input_file:
values = string.split(data,',')
else:
values = string.split(data,'\t')
row_count+=1
if firstLine:
uid_index = 0
if data[0]!='#':
if force == True:
values2=[]
for x in values:
if ':' in x:
x=string.split(x,':')[1]
values2.append(x)
else:
values2.append(x)
filter_names2=[]
for f in filter_names:
if f in values: filter_names2.append(f)
if len(filter_names2)<2:
filter_names2=[]
for f in filter_names:
if f in values2: filter_names2.append(f)
filter_names = filter_names2
else:
filter_names = filter_names2
if force == 'include':
values= ['UID']+filter_names
pass
try:
sample_index_list = map(lambda x: values.index(x), filter_names)
except:
### If ":" in header name
if ':' in line:
values2=[]
for x in values:
if ':' in x:
x=string.split(x,':')[1]
values2.append(x)
values = values2
sample_index_list = map(lambda x: values.index(x), filter_names)
elif '.' in line:
values2=[]
for x in values:
if '.' in x:
x=string.split(x,'.')[0]
values2.append(x)
values = values2
sample_index_list = map(lambda x: values.index(x), filter_names)
elif '.$' in line:
filter_names2=[]
for f in filter_names: ### if the name in the filter is a string within the input data-file
for f1 in values:
if f in f1:
filter_names2.append(f1) ### change to the reference name
break
print len(filter_names2), len(values), len(filter_names);kill
filter_names = filter_names2
#filter_names = map(lambda x: string.split(x,'.')[0], filter_names)
#values = map(lambda x: string.split(x,'.')[0], values)
sample_index_list = map(lambda x: values.index(x), filter_names)
else:
temp_count=1
for x in filter_names:
if x not in values:
temp_count+=1
if temp_count==500: print 'too many to print'
elif temp_count>500:
pass
else: print x,
print temp_count,'are missing';kill
firstLine = False
header = values
if 'PSI_EventAnnotation' in input_file:
uid_index = values.index('UID')
if log2:
try:
values = map(lambda x: math.log(x+1,2))
except:
pass
if calculateCentroids:
if len(comparisons)>0:
export_object.write(string.join(['UID']+map(lambda x: x[0]+'-fold',comparisons),'\t')+'\n') ### Use the numerator group name
else:
clusters = map(str,group_index_db)
export_object.write(string.join([values[uid_index]]+clusters,'\t')+'\n')
continue ### skip the below code
if force == 'include':
if row_count>1:
values += ['0']
try: filtered_values = map(lambda x: values[x], sample_index_list) ### simple and fast way to reorganize the samples
except Exception:
print traceback.format_exc()
print len(values), len(sample_index_list)
print input_file, len(filter_names)
for i in filter_names:
if i not in header:
print i, 'not found'
sys.exit()
sys.exit()
### For PSI files with missing values at the end of each line, often
if len(header) != len(values):
diff = len(header)-len(values)
values+=diff*['']
filtered_values = map(lambda x: values[x], sample_index_list) ### simple and fast way to reorganize the samples
#print values[0]; print sample_index_list; print values; print len(values); print len(prior_values);kill
prior_values=values
######################## Begin Centroid Calculation ########################
if calculateCentroids:
mean_matrix=[]
means={}
for cluster in group_index_db:
#### group_index_db[cluster] is all of the indeces for samples in a noted group, cluster is the actual cluster name (not number)
try: mean=statistics.avg(map(lambda x: float(filtered_values[x]), group_index_db[cluster]))
except:
continue
#mean = map(lambda x: filtered_values[uid][x], group_index_db[cluster]) ### Only one value
means[cluster]=mean
mean_matrix.append(str(mean))
filtered_values = mean_matrix
if len(comparisons)>0:
fold_matrix=[]
for (group2, group1) in comparisons:
fold = means[group2]-means[group1]
fold_matrix.append(str(fold))
filtered_values = fold_matrix
######################## End Centroid Calculation ########################
export_object.write(string.join([values[uid_index]]+filtered_values,'\t')+'\n')
export_object.close()
print 'Filtered columns printed to:',output_file
return output_file
def filterRows(input_file,output_file,filterDB=None,logData=False,exclude=False):
export_object = open(output_file,'w')
firstLine = True
uid_index=0
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
values = string.split(data,'\t')
if 'PSI_EventAnnotation' in input_file and firstLine:
uid_index = values.index('UID')
if firstLine:
try:uid_index=values.index('UID')
except Exception:
try: uid_index=values.index('uid')
except Exception: uid_index = 0
firstLine = False
export_object.write(line)
else:
if filterDB!=None:
if values[uid_index] in filterDB:
if logData:
line = string.join([values[0]]+map(str,(map(lambda x: math.log(float(x)+1,2),values[1:]))),'\t')+'\n'
if exclude==False:
export_object.write(line)
elif exclude: ### Only write out the entries NOT in the filter list
export_object.write(line)
else:
max_val = max(map(float,values[1:]))
#min_val = min(map(float,values[1:]))
#if max_val>0.1:
if max_val < 0.1:
export_object.write(line)
export_object.close()
print 'Filtered rows printed to:',output_file
def getFiltersFromHeatmap(filter_file):
import collections
alt_filter_list=None
group_index_db = collections.OrderedDict()
index=0
for line in open(filter_file,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if t[1] == 'row_clusters-flat':
filter_list = string.split(data,'\t')[2:]
if ':' in data:
alt_filter_list = map(lambda x: string.split(x,":")[1],string.split(data,'\t')[2:])
elif t[0] == 'column_clusters-flat':
cluster_list = string.split(data,'\t')[2:]
if 'NA' in cluster_list: ### When MarkerFinder notated groups
sample_names = map(lambda x: string.split(x,":")[1],filter_list)
cluster_list = map(lambda x: string.split(x,":")[0],filter_list)
filter_list = sample_names
elif alt_filter_list != None: ### When undesired groups notated in the sample names
filter_list = alt_filter_list
index=0
for sample in filter_list:
cluster=cluster_list[index]
try: group_index_db[cluster].append(index)
except Exception: group_index_db[cluster] = [index]
index+=1
return filter_list, group_index_db
def getComparisons(filter_file):
"""Import group comparisons when calculating fold changes"""
groups={}
for line in open(filter_file,'rU').xreadlines():
data = cleanUpLine(line)
sample,group_num,group_name = string.split(data,'\t')
groups[group_num]=group_name
comparisons=[]
comparison_file = string.replace(filter_file,'groups.','comps.')
for line in open(comparison_file,'rU').xreadlines():
data = cleanUpLine(line)
group2,group1 = string.split(data,'\t')
group2 = groups[group2]
group1 = groups[group1]
comparisons.append([group2,group1])
return comparisons
def getFilters(filter_file,calculateCentroids=False):
"""Import sample list for filtering and optionally sample to groups """
filter_list=[]
if calculateCentroids:
import collections
group_index_db = collections.OrderedDict()
index=0
for line in open(filter_file,'rU').xreadlines():
data = cleanUpLine(line)
sample = string.split(data,'\t')[0]
filter_list.append(sample)
if calculateCentroids:
sample,group_num,group_name = string.split(data,'\t')
try: group_index_db[group_name].append(index)
except Exception: group_index_db[group_name] = [index]
index+=1
if calculateCentroids:
return filter_list,group_index_db
else:
return filter_list
""" Filter a dataset based on number of genes with expression above the indicated threshold """
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
#https://stackoverflow.com/questions/36598136/remove-all-hex-characters-from-string-in-python
try: data = data.decode('utf8').encode('ascii', errors='ignore') ### get rid of bad quotes
except:
print data
return data
def statisticallyFilterTransposedFile(input_file,output_file,threshold,minGeneCutoff=499,binarize=True):
""" The input file is a large expression matrix with the rows as cells and the columns as genes to filter """
if 'exp.' in input_file:
counts_file = string.replace(input_file,'exp.','geneCount.')
else:
counts_file = input_file[:-4]+'-geneCount.txt'
import export
eo = export.ExportFile(counts_file)
eo.write('Sample\tGenes Expressed(threshold:'+str(threshold)+')\n')
eo_full = export.ExportFile(output_file)
sample_expressed_genes={}
header=True
count_sum_array=[]
cells_retained=0
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
if '.csv' in input_file:
t = string.split(data,',')
else:
t = string.split(data,'\t')
if header:
eo_full.write(line)
gene_len = len(t)
genes = t[1:]
header=False
else:
cell = t[0]
values = map(float,t[1:])
binarized_values = []
for v in values:
if v>threshold:
if binarize: ### do not count the individual read counts, only if a gene is expressed or not
binarized_values.append(1)
else:
binarized_values.append(v) ### When summarizing counts and not genes expressed
else: binarized_values.append(0)
genes_expressed = sum(binarized_values)
if genes_expressed>minGeneCutoff:
eo_full.write(line)
cells_retained+=1
eo.write(cell+'\t'+str(genes_expressed)+'\n')
eo.close()
eo_full.close()
print cells_retained, 'Cells with genes expressed above the threshold'
def statisticallyFilterFile(input_file,output_file,threshold,minGeneCutoff=499,binarize=True):
if 'exp.' in input_file:
counts_file = string.replace(input_file,'exp.','geneCount.')
else:
counts_file = input_file[:-4]+'-geneCount.txt'
sample_expressed_genes={}
header=True
junction_max=[]
count_sum_array=[]
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
if '.csv' in input_file:
t = string.split(data,',')
else:
t = string.split(data,'\t')
if header:
header_len = len(t)
full_header = t
samples = t[1:]
header=False
count_sum_array=[0]*len(samples)
else:
if len(t)==(header_len+1):
### Correct header with a missing UID column
samples = full_header
count_sum_array=[0]*len(samples)
print 'fixing bad header'
try: values = map(float,t[1:])
except Exception:
if 'NA' in t[1:]:
tn = [0 if x=='NA' else x for x in t[1:]] ### Replace NAs
values = map(float,tn)
else:
tn = [0 if x=='' else x for x in t[1:]] ### Replace NAs
values = map(float,tn)
binarized_values = []
for v in values:
if v>threshold:
if binarize:
binarized_values.append(1)
else:
binarized_values.append(v) ### When summarizing counts and not genes expressed
else: binarized_values.append(0)
count_sum_array = [sum(value) for value in zip(*[count_sum_array,binarized_values])]
index=0
distribution=[]
count_sum_array_db={}
samples_to_retain =[]
samples_to_exclude = []
for sample in samples:
count_sum_array_db[sample] = count_sum_array[index]
distribution.append(count_sum_array[index])
index+=1
from stats_scripts import statistics
distribution.sort()
avg = int(statistics.avg(distribution))
stdev = int(statistics.stdev(distribution))
min_exp = int(min(distribution))
cutoff = avg - (stdev*2)
dev = 2
print 'The average number of genes expressed above %s is %s, (SD is %s, min is %s)' % (threshold,avg,stdev,min_exp)
if cutoff<0:
if (stdev-avg)>0:
cutoff = avg - (stdev/2); dev = 0.5
print cutoff, 'genes expressed selected as a default cutoff to include cells (2-stdev away)'
else:
cutoff = avg - stdev; dev = 1
print cutoff, 'genes expressed selected as a default cutoff to include cells (1-stdev away)'
if min_exp>cutoff:
cutoff = avg - stdev; dev = 1
print 'Using a default cutoff of >=500 genes per cell expressed/cell'
import export
eo = export.ExportFile(counts_file)
eo.write('Sample\tGenes Expressed(threshold:'+str(threshold)+')\n')
for sample in samples: ### keep the original order
if count_sum_array_db[sample]>minGeneCutoff:
samples_to_retain.append(sample)
else:
samples_to_exclude.append(sample)
eo.write(sample+'\t'+str(count_sum_array_db[sample])+'\n')
if len(samples_to_retain)<4: ### Don't remove any if too few samples
samples_to_retain+=samples_to_exclude
else:
print len(samples_to_exclude), 'samples removed (< %d genes expressed)' % minGeneCutoff
eo.close()
print 'Exporting the filtered expression file to:'
print output_file
filterFile(input_file,output_file,samples_to_retain)
def transposeMatrix(input_file):
arrays=[]
import export
eo = export.ExportFile(input_file[:-4]+'-transposed.txt')
for line in open(input_file,'rU').xreadlines():
data = cleanUpLine(line)
values = string.split(data,'\t')
arrays.append(values)
t_arrays = zip(*arrays)
for t in t_arrays:
eo.write(string.join(t,'\t')+'\n')
eo.close()
if __name__ == '__main__':
################ Comand-line arguments ################
import getopt
filter_rows=False
filter_file=None
force=False
exclude = False
calculateCentroids=False
geneCountFilter=False
expressionCutoff=1
returnComparisons=False
comparisons=[]
binarize=True
transpose=False
log2=False
fileFormat = 'columns'
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
filter_names = ['test-1','test-2','test-3']
input_file = makeTestFile()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','f=','r=','median=','medoid=', 'fold=', 'folds=',
'centroid=','force=','minGeneCutoff=','expressionCutoff=','geneCountFilter=', 'binarize=',
'transpose=','fileFormat=','log2='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--i': input_file=arg
elif opt == '--transpose': transpose = True
elif opt == '--f': filter_file=arg
elif opt == '--median' or opt=='--medoid' or opt=='--centroid': calculateCentroids = True
elif opt == '--fold': returnComparisons = True
elif opt == '--log2': log2 = True
elif opt == '--r':
if arg == 'exclude':
filter_rows=True
exclude=True
else:
filter_rows=True
elif opt == '--force':
if arg == 'include': force = arg
else: force=True
elif opt == '--geneCountFilter': geneCountFilter=True
elif opt == '--expressionCutoff': expressionCutoff=float(arg)
elif opt == '--minGeneCutoff': minGeneCutoff=int(arg)
elif opt == '--binarize':
if 'alse' in arg or 'no' in arg:
binarize=False
elif opt == '--fileFormat':
fileFormat=arg
if fileFormat != 'columns':
fileFormat = 'rows'
output_file = input_file[:-4]+'-filtered.txt'
if transpose:
transposeMatrix(input_file)
sys.exit()
if geneCountFilter:
if fileFormat == 'columns':
statisticallyFilterFile(input_file,input_file[:-4]+'-OutlierRemoved.txt',expressionCutoff,minGeneCutoff=199,binarize=binarize); sys.exit()
else:
statisticallyFilterTransposedFile(input_file,input_file[:-4]+'-OutlierRemoved.txt',expressionCutoff,minGeneCutoff=199,binarize=binarize); sys.exit()
if filter_rows:
filter_names = getFilters(filter_file)
filterRows(input_file,output_file,filterDB=filter_names,logData=False,exclude=exclude)
elif calculateCentroids:
output_file = input_file[:-4]+'-mean.txt'
if returnComparisons:
comparisons = getComparisons(filter_file)
output_file = input_file[:-4]+'-fold.txt'
try: filter_names,group_index_db = getFilters(filter_file,calculateCentroids=calculateCentroids)
except Exception:
print traceback.format_exc()
filter_names,group_index_db = getFiltersFromHeatmap(filter_file)
filterFile(input_file,output_file,(filter_names,group_index_db),force=force,calculateCentroids=calculateCentroids,comparisons=comparisons)
else:
filter_names = getFilters(filter_file)
filterFile(input_file,output_file,filter_names,force=force,log2=log2)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/sampleIndexSelection.py
|
sampleIndexSelection.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains methods for reading Affymetrix formatted CSV annotations files
from http://www.affymetrix.com, extracting out various direct and inferred gene relationships,
downloading, integrating and inferring WikiPathway gene relationships and downloading and
extracting EntrezGene-Gene Ontology relationships from NCBI."""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import datetime
import export
import update
import gene_associations
try: from import_scripts import OBO_import
except Exception: import OBO_import
################# Parse directory files
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
try:
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-4:] == ".TXT" or entry[-4:] == ".tab" or '.zip' in entry:
dir_list2.append(entry)
except Exception:
#print sub_dir, "NOT FOUND!!!!"
dir_list2=[]
return dir_list2
def returnDirectories(sub_dir):
dir_list = unique.returnDirectories(sub_dir)
###Below code used to prevent folder names from being included
dir_list2 = []
for i in dir_list:
if "." not in i: dir_list2.append(i)
return dir_list2
def cleanUpLine(line):
data = string.replace(line,'\n','')
data = string.replace(data,'\c','')
data = string.replace(data,'\r','')
data = string.replace(data,'"','')
return data
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file = getDirectoryFiles(self.data,str(search_term))
if len(file)<1: print search_term,'not found'
return file
def returndirectory(self):
dir_list = read_directory(self.data)
return dir_list
def getDirectoryFiles(import_dir, search_term):
exact_file = ''
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file = affy_data_dir
return exact_file
################# Import and Annotate Data
class AffymetrixInformation:
def __init__(self,probeset,symbol,ensembl,entrez,unigene,uniprot,description,goids,go_names,pathways):
self._probeset = probeset; self._ensembl = ensembl; self._uniprot = uniprot; self._description = description
self._entrez = entrez; self._symbol = symbol; self._unigene = unigene
self._goids = goids; self._go_names = go_names; self._pathways = pathways
def ArrayID(self): return self._probeset
def Description(self): return self._description
def Symbol(self): return self._symbol
def Ensembl(self): return self._ensembl
def EnsemblString(self):
ens_str = string.join(self._ensembl,'|')
return ens_str
def Entrez(self): return self._entrez
def EntrezString(self):
entrez_str = string.join(self._entrez,'|')
return entrez_str
def Unigene(self): return self._unigene
def UnigeneString(self):
unigene_str = string.join(self._unigene,'|')
return unigene_str
def Uniprot(self): return self._uniprot
def GOIDs(self): return self._goids
def GOProcessIDs(self): return self._goids[0]
def GOComponentIDs(self): return self._goids[1]
def GOFunctionIDs(self): return self._goids[2]
def GONameLists(self): return self._go_names
def GOProcessNames(self):
go_names = string.join(self._go_names[0],' // ')
return go_names
def GOComponentNames(self):
go_names = string.join(self._go_names[1],' // ')
return go_names
def GOFunctionNames(self):
go_names = string.join(self._go_names[2],' // ')
return go_names
def GONames(self):
go_names = self._go_names[0]+self._go_names[1]+self._go_names[2]
return go_names
def Pathways(self): return self._pathways
def PathwayInfo(self):
pathway_str = string.join(self.Pathways(),' // ')
return pathway_str
def GOPathwayInfo(self):
pathway_str = string.join(self.GONames() + self.Pathways(),' // ')
return pathway_str
def resetEnsembl(self,ensembl): self._ensembl = ensembl
def resetEntrez(self,entrez): self._entrez = entrez
def setSequence(self,seq): self.seq = seq
def setSpecies(self,species): self.species = species
def setCoordinates(self,coordinates): self.coordinates = coordinates
def Sequence(self): return self.seq
def Species(self): return self.species
def Coordinates(self): return self.coordinates
def ArrayValues(self):
output = self.Symbol()+'|'+self.ArrayID()
return output
def __repr__(self): return self.ArrayValues()
class InferredEntrezInformation:
def __init__(self,symbol,entrez,description):
self._entrez = entrez; self._symbol = symbol; self._description = description
def Entrez(self): return self._entrez
def Symbol(self): return self._symbol
def Description(self): return self._description
def DataValues(self):
output = self.Symbol()+'|'+self.Entrez()
return output
def __repr__(self): return self.DataValues()
def eliminate_redundant_dict_values(database):
db1={}
for key in database: list = unique.unique(database[key]); list.sort(); db1[key] = list
return db1
def buildMODDbase():
mod_db={}
mod_db['Dr'] = 'FlyBase'
mod_db['Ce'] = 'WormBase'
mod_db['Mm'] = 'MGI Name'
mod_db['Rn'] = 'RGD Name'
mod_db['Sc'] = 'SGD accession number'
mod_db['At'] = 'AGI'
return mod_db
def verifyFile(filename):
fn=filepath(filename); file_found = 'yes'
try:
for line in open(fn,'rU').xreadlines():break
except Exception: file_found = 'no'
return file_found
######### Import New Data ##########
def parse_affymetrix_annotations(filename,species):
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
temp_affy_db = {}; x=0; y=0
if process_go == 'yes':
eg_go_found = verifyFile('Databases/'+species+'/gene-go/EntrezGene-GeneOntology.txt')
ens_go_found = verifyFile('Databases/'+species+'/gene-go/Ensembl-GeneOntology.txt')
if eg_go_found == 'no' or ens_go_found == 'no': process_go_var = 'yes'
else: process_go_var = 'yes'
fn=filepath(filename); mod_db = buildMODDbase()
for line in open(fn,'r').readlines():
probeset_data = string.replace(line,'\n','') #remove endline
probeset_data = string.replace(probeset_data,'---','')
affy_data = string.split(probeset_data[1:-1],'","') #remove endline
try: mod_name = mod_db[species]
except KeyError: mod_name = 'YYYYYY' ###Something that should not be found
if x==0 and line[0]!='#':
x=1; affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
if 'transcript_cluster_id' in affy_headers[y]: tc = y
if 'Gene Symbol' in affy_headers[y]: gs = y
if 'Ensembl' in affy_headers[y]: ens = y
if ('nigene' in affy_headers[y] or 'UniGene' in affy_headers[y]) and 'Cluster' not in affy_headers[y]: ug = y
if 'mrna_assignment' in affy_headers[y]: ma = y
if 'gene_assignment' in affy_headers[y]: ga = y
if 'Entrez' in affy_headers[y] or 'LocusLink' in affy_headers[y]: ll = y
if 'SwissProt' in affy_headers[y] or 'swissprot' in affy_headers[y]: sp = y
if 'Gene Title' in affy_headers[y]: gt = y
if 'rocess' in affy_headers[y]: bp = y
if 'omponent' in affy_headers[y]: cc = y
if 'unction' in affy_headers[y]: mf = y
if 'RefSeq Protein' in affy_headers[y]: rp = y
if 'RefSeq Transcript' in affy_headers[y]: rt = y
if 'athway' in affy_headers[y]: gp = y
### miRNA array specific
if 'Alignments' == affy_headers[y]: al = y
if 'Transcript ID(Array Design)' in affy_headers[y]: ti = y
if 'Sequence Type' in affy_headers[y]: st = y
if 'Sequence' == affy_headers[y]: sq = y
if 'Species Scientific Name' == affy_headers[y]: ss = y
if mod_name in affy_headers[y]: mn = y
y += 1
elif x == 1:
###If using the Affy 2.0 Annotation file structure, both probeset and transcript cluster IDs are present
###If transcript_cluster centric file (gene's only no probesets), then probeset = transcript_cluster
try:
transcript_cluster = affy_data[tc] ###Affy's unique Gene-ID
probeset = affy_data[ps]
if probeset != transcript_cluster: ###Occurs for transcript_cluster ID centered files
probesets = [probeset,transcript_cluster]
else: probesets = [probeset]
ps = tc; version = 2 ### Used to define where the non-UID data exists
except UnboundLocalError:
try: probesets = [affy_data[ps]]; uniprot = affy_data[sp]; version = 1
except Exception: probesets = [affy_data[ps]]; version = 3 ### Specific to miRNA arrays
try: uniprot = affy_data[sp]; unigene = affy_data[ug]; uniprot_list = string.split(uniprot,' /// ')
except Exception: uniprot=''; unigene=''; uniprot_list=[] ### This occurs due to miRNA array or a random python error, typically once twice in the file
symbol = ''; description = ''
try: pathway_data = affy_data[gp]
except Exception: pathway_data='' ### This occurs due to miRNA array or a random python error, typically once twice in the file
for probeset in probesets:
if version == 1: ###Applies to 3' biased arrays only (Conventional Format)
description = affy_data[gt]; symbol = affy_data[gs]; goa=''; entrez = affy_data[ll]
ensembl_list = []; ensembl = affy_data[ens]; ensembl_list = string.split(ensembl,' /// ')
entrez_list = string.split(entrez,' /// '); unigene_list = string.split(unigene,' /// ')
uniprot_list = string.split(uniprot,' /// '); symbol_list = string.split(symbol,' /// ')
try: mod = affy_data[mn]; mod_list = string.split(mod,' /// ')
except UnboundLocalError: mod = ''; mod_list = []
if len(symbol)<1 and len(mod)>0: symbol = mod ### For example, for At, use Tair if no symbol present
if len(mod_list)>3: mod_list=[]
ref_prot = affy_data[rp]; ref_prot_list = string.split(ref_prot,' /// ')
ref_tran = affy_data[rt]; ref_tran_list = string.split(ref_tran,' /// ')
###Process GO information if desired
if process_go_var == 'yes':
process = affy_data[bp]; component = affy_data[cc]; function = affy_data[mf]
process_goids, process_names = extractPathwayData(process,'GO',version)
component_goids, component_names = extractPathwayData(component,'GO',version)
function_goids, function_names = extractPathwayData(function,'GO',version)
goids = [process_goids,component_goids,function_goids]
go_names = [process_names,component_names,function_names]
else: goids=[]; go_names=[]
if extract_pathway_names == 'yes': null, pathways = extractPathwayData(pathway_data,'pathway',version)
else: pathways = []
ai = AffymetrixInformation(probeset, symbol, ensembl_list, entrez_list, unigene_list, uniprot_list, description, goids, go_names, pathways)
if len(entrez_list)<5: affy_annotation_db[probeset] = ai
if parse_wikipathways == 'yes':
if (len(entrez_list)<4 and len(entrez_list)>0) and (len(ensembl_list)<4 and len(ensembl_list)>0):
primary_list = entrez_list+ensembl_list
for primary in primary_list:
if len(primary)>0:
for gene in ensembl_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in ref_prot_list:
gene_data = string.split(gene,'.'); gene = gene_data[0]
if len(gene)>1: meta[primary,gene]=[]
for gene in ref_tran_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in unigene_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in mod_list:
if len(gene)>1: meta[primary,'mod:'+gene]=[]
for gene in symbol_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in uniprot_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in entrez_list:
if len(gene)>1: meta[primary,gene]=[]
#symbol_list = string.split(symbol,' /// '); description_list = string.split(description,' /// ')
if len(entrez_list)<2: ###Only store annotations for EntrezGene if there is only one listed ID, since the symbol, description and Entrez Gene are sorted alphabetically, not organized relative to each other (stupid)
iter = 0
for entrez in entrez_list:
#symbol = symbol_list[iter]; description = description_list[iter] ###grab the symbol that matches the EntrezGene entry
z = InferredEntrezInformation(symbol,entrez,description)
try: gene_annotation_db[entrez] = z
except NameError: null=[]
iter += 1
if len(ensembl_list)<2: ###Only store annotations for EntrezGene if there is only one listed ID, since the symbol, description and Entrez Gene are sorted alphabetically, not organized relative to each other (stupid)
for ensembl in ensembl_list:
z = InferredEntrezInformation(symbol,ensembl,description)
try: gene_annotation_db['ENS:'+ensembl] = z
except NameError: null=[]
elif version == 2: ### Applies to Exon, Transcript, whole geneome Gene arrays.
uniprot_list2 = []
for uniprot_id in uniprot_list:
if len(uniprot_id)>0:
try: a = int(uniprot_id[1]); uniprot_list2.append(uniprot_id)
except ValueError: null = []
uniprot_list = uniprot_list2
ensembl_list=[]; descriptions=[]; refseq_list=[]; symbol_list=[]
try: mrna_associations = affy_data[ma]
except IndexError: mrna_associations='';
ensembl_data = string.split(mrna_associations,' /// ')
for entry in ensembl_data:
annotations = string.split(entry,' // ')
#if probeset == '8148358': print annotations
for i in annotations:
if 'gene:ENS' in i:
ensembl_id_data = string.split(i,'gene:ENS')
ensembl_ids = ensembl_id_data[1:]; description = ensembl_id_data[0] ###There can be multiple IDs
descriptions.append((len(description),description))
for ensembl_id in ensembl_ids:
ensembl_id = string.split(ensembl_id,' ')
ensembl_id = 'ENS'+ensembl_id[0]; ensembl_list.append(ensembl_id)
if 'NM_' in i:
refseq_id = string.replace(i,' ','')
refseq_list.append(refseq_id)
#if probeset == '8148358': print ensembl_list; kill
try: gene_assocs = affy_data[ga]; entrez_list=[]
except IndexError: gene_assocs=''; entrez_list=[]
entrez_data = string.split(gene_assocs,' /// ')
for entry in entrez_data:
try:
if len(entry)>0:
annotations = string.split(entry,' // ')
entrez_gene = int(annotations[-1]); entrez_list.append(str(entrez_gene))
symbol = annotations[1]; description = annotations[2]; descriptions.append((len(description),description))
symbol_list.append(symbol)
#print entrez_gene,symbol, descriptions;kill
z = InferredEntrezInformation(symbol,entrez_gene,description)
try: gene_annotation_db[str(entrez_gene)] = z ###create an inferred Entrez gene database
except NameError: null = []
except ValueError: null = []
if len(symbol_list) == 1 and len(ensembl_list)>0:
symbol = symbol_list[0]
for ensembl in ensembl_list:
z = InferredEntrezInformation(symbol,ensembl,description)
try: gene_annotation_db['ENS:'+ensembl] = z ###create an inferred Entrez gene database
except NameError: null = []
gene_assocs = string.replace(gene_assocs,'---','')
unigene_data = string.split(unigene,' /// '); unigene_list = []
for entry in unigene_data:
if len(entry)>0:
annotations = string.split(entry,' // ')
try: null = int(annotations[-2][3:]); unigene_list.append(annotations[-2])
except Exception: null = []
###Only applies to optional GOID inclusion
if parse_wikipathways == 'yes':
if (len(entrez_list)<4 and len(entrez_list)>0) and (len(ensembl_list)<4 and len(ensembl_list)>0):
primary_list = entrez_list+ensembl_list
for primary in primary_list:
if len(primary)>0:
for gene in ensembl_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in refseq_list:
gene_data = string.split(gene,'.'); gene = gene_data[0]
if len(gene)>1: meta[primary,gene]=[]
for gene in unigene_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in symbol_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in uniprot_list:
if len(gene)>1: meta[primary,gene]=[]
for gene in entrez_list:
if len(gene)>1: meta[primary,gene]=[]
if process_go_var == 'yes':
try: process = affy_data[bp]; component = affy_data[cc]; function = affy_data[mf]
except IndexError: process = ''; component=''; function=''### This occurs due to a random python error, typically once twice in the file
process_goids, process_names = extractPathwayData(process,'GO',version)
component_goids, component_names = extractPathwayData(component,'GO',version)
function_goids, function_names = extractPathwayData(function,'GO',version)
goids = [process_goids,component_goids,function_goids]
go_names = [process_names,component_names,function_names]
else: goids=[]; go_names=[]
if extract_pathway_names == 'yes': null, pathways = extractPathwayData(pathway_data,[],version)
else: pathways = []
entrez_list=unique.unique(entrez_list); unigene_list=unique.unique(unigene_list); uniprot_list=unique.unique(uniprot_list); ensembl_list=unique.unique(ensembl_list)
descriptions2=[]
for i in descriptions:
if 'cdna:known' not in i: descriptions2.append(i)
descriptions = descriptions2
if len(descriptions)>0:
descriptions.sort(); description = descriptions[-1][1]
if description[0] == ' ': description = description[1:] ### some entries begin with a blank
ai = AffymetrixInformation(probeset,symbol,ensembl_list,entrez_list,unigene_list,uniprot_list,description,goids,go_names,pathways)
if len(entrez_list)<5 and len(ensembl_list)<5: affy_annotation_db[probeset] = ai
elif version == 3:
description = affy_data[st]; symbol = affy_data[ti]
ai = AffymetrixInformation(probeset, symbol, [], [], [], [], description, [], [], [])
ai.setSequence(affy_data[sq])
ai.setSpecies(affy_data[ss])
ai.setCoordinates(affy_data[al])
affy_annotation_db[probeset] = ai
return version
def getUIDAnnotationsFromGOElite(conventional_array_db,species_code,vendor,use_go):
import gene_associations; import time
start_time = time.time()
### Get Gene Ontology gene associations
try: gene_to_mapp_ens = gene_associations.importGeneMAPPData(species_code,'Ensembl-MAPP.txt') ### was just 'Ensembl'
except Exception: gene_to_mapp_ens = {}
try: gene_to_mapp_eg = gene_associations.importGeneMAPPData(species_code,'EntrezGene-MAPP.txt')
except Exception: gene_to_mapp_eg = {}
if vendor == 'Affymetrix': ### Remove exon associations which decrease run efficency and are superfulous
try: ens_to_array = gene_associations.getGeneToUidNoExon(species_code,'Ensembl-'+vendor); print 'Ensembl-'+vendor,'relationships imported'
except Exception: ens_to_array={}
try: eg_to_array = gene_associations.getGeneToUidNoExon(species_code,'EntrezGene-'+vendor); print 'EntrezGene-'+vendor,'relationships imported'
except Exception: eg_to_array={}
print '*',
else:
try: ens_to_array = gene_associations.getGeneToUid(species_code,'Ensembl-'+vendor)
except Exception: ens_to_array = {}
try: eg_to_array = gene_associations.getGeneToUid(species_code,'EntrezGene-'+vendor)
except Exception: eg_to_array = {}
print '*',
try: ens_annotations = gene_associations.importGeneData(species_code,'Ensembl')
except Exception: ens_annotations = {}
try: eg_annotations = gene_associations.importGeneData(species_code,'EntrezGene')
except Exception: eg_annotations = {}
if use_go == 'yes':
try: from import_scripts import OBO_import
except Exception: import OBO_import
go_annotations = OBO_import.importPreviousOntologyAnnotations('GeneOntology')
try: gene_to_go_ens = gene_associations.importGeneToOntologyData(species_code,'Ensembl','null','GeneOntology')
except Exception: gene_to_go_ens = {}
print '*',
try: gene_to_go_eg = gene_associations.importGeneToOntologyData(species_code,'EntrezGene','null','GeneOntology')
except Exception: gene_to_go_eg = {}
print '*',
component_db,process_db,function_db,selected_array_ens = annotateGOElitePathways('GO',go_annotations,gene_to_go_ens,ens_to_array)
print '*',
component_eg_db,process_eg_db,function_eg_db,selected_array_eg = annotateGOElitePathways('GO',go_annotations,gene_to_go_eg,eg_to_array)
print '*',
component_db = combineDBs(component_eg_db,component_db)
print '*',
process_db = combineDBs(process_eg_db,process_db)
print '*',
function_db = combineDBs(function_eg_db,function_db)
else:
selected_array_ens={}
selected_array_eg ={}
print '* * * * * *',
unique_arrayids={} ### Get all unique probesets
for gene in ens_to_array:
for uid in ens_to_array[gene]: unique_arrayids[uid]=[]
for gene in eg_to_array:
for uid in eg_to_array[gene]: unique_arrayids[uid]=[]
array_ens_mapp_db = annotateGOElitePathways('MAPP','',gene_to_mapp_ens,ens_to_array)
array_eg_mapp_db = annotateGOElitePathways('MAPP','',gene_to_mapp_eg,eg_to_array)
array_mapp_db = combineDBs(array_ens_mapp_db,array_eg_mapp_db)
print '*',
array_to_ens = swapKeyValues(ens_to_array)
array_to_eg = swapKeyValues(eg_to_array)
for uid in selected_array_ens:
gene = selected_array_ens[uid] ### Best candidate gene of several
array_to_ens[uid].remove(gene) ### Delete the first instance of this Ensembl
array_to_ens[uid].append(gene); array_to_ens[uid].reverse() ### Make the first ID
for uid in selected_array_eg:
gene = selected_array_eg[uid]
array_to_eg[uid].remove(gene) ### Delete the first instance of this Ensembl
array_to_eg[uid].append(gene); array_to_eg[uid].reverse() ### Make the first ID
global array_symbols; global array_descriptions; array_symbols={}; array_descriptions={}
getArrayIDAnnotations(array_to_ens,ens_annotations,'Ensembl')
getArrayIDAnnotations(array_to_eg,eg_annotations,'Entrez')
print '*',
for arrayid in unique_arrayids:
try: component_names = component_db[arrayid]
except Exception: component_names=[]
try: process_names = process_db[arrayid]
except Exception: process_names=[]
try: function_names = function_db[arrayid]
except Exception: function_names=[]
try: wp_names = array_mapp_db[arrayid]
except Exception: wp_names=[]
try: ensembls = array_to_ens[arrayid]
except Exception: ensembls=[]
try: entrezs = array_to_eg[arrayid]
except Exception: entrezs=[]
try: symbol = array_symbols[arrayid]
except Exception: symbol=''
try: description = array_descriptions[arrayid]
except Exception: description=''
#if len(wp_names)>0:
#print arrayid, component_names, process_names, function_names, wp_names, ensembls, entrezs, symbol, description;kill
try:
ca = conventional_array_db[arrayid]
definition = ca.Description()
symbol = ca.Symbol()
ens = ca.Ensembl()
entrez = ca.Entrez()
pathways = ca.Pathways()
process, component, function = ca.GONameLists()
ensembls+=ens; entrezs+=entrez; wp_names+=pathways
component_names+=component; process_names+=process; function_names+=function
ensembls=unique.unique(ensembls); entrezs=unique.unique(entrezs); wp_names=unique.unique(wp_names)
component_names=unique.unique(component_names); process_names=unique.unique(process_names); function_names=unique.unique(function_names)
except Exception: null=[]
go_names = process_names,component_names,function_names
ai = AffymetrixInformation(arrayid,symbol,ensembls,entrezs,[],[],description,[],go_names,wp_names)
conventional_array_db[arrayid] = ai
#print len(conventional_array_db),'ArrayIDs with annotations.'
end_time = time.time(); time_diff = int(end_time-start_time)
print 'ArrayID annotations imported in',time_diff, 'seconds'
return conventional_array_db
class PathwayInformation:
def __init__(self,component_list,function_list,process_list,pathway_list):
self.component_list = component_list; self.function_list = function_list
self.process_list = process_list; self.pathway_list = pathway_list
def Component(self): return self.Format(self.component_list)
def Process(self): return self.Format(self.process_list)
def Function(self): return self.Format(self.function_list)
def Pathway(self): return self.Format(self.pathway_list)
def Combined(self): return self.Format(self.pathway_list+self.process_list+self.function_list+self.component_list)
def Format(self,terms):
return string.join(terms,' // ')
def getHousekeepingGenes(species_code):
vendor = 'Affymetrix'
exclude = ['ENSG00000256901'] ### Incorrect homology with housekeeping
import gene_associations
try: ens_to_array = gene_associations.getGeneToUidNoExon(species_code,'Ensembl-'+vendor); print 'Ensembl-'+vendor,'relationships imported'
except Exception: ens_to_array={}
housekeeping_genes={}
for gene in ens_to_array:
for uid in ens_to_array[gene]:
if 'AFFX' in uid:
if gene not in exclude: housekeeping_genes[gene]=[]
return housekeeping_genes
def getEnsemblAnnotationsFromGOElite(species_code):
import gene_associations; import time
start_time = time.time()
### Get Gene Ontology gene associations
try: gene_to_mapp_ens = gene_associations.importGeneMAPPData(species_code,'Ensembl-MAPP.txt')
except Exception: gene_to_mapp_ens = {}
try: from import_scripts import OBO_import
except Exception: import OBO_import
go_annotations = OBO_import.importPreviousOntologyAnnotations('GeneOntology')
try: gene_to_go_ens = gene_associations.importGeneToOntologyData(species_code,'Ensembl','null','GeneOntology')
except Exception: gene_to_go_ens = {}
component_db={}; process_db={}; function_db={}; all_genes={}
for gene in gene_to_go_ens:
all_genes[gene]=[]
for goid in gene_to_go_ens[gene]:
if goid in go_annotations:
s = go_annotations[goid]
go_name = string.replace(s.OntologyTerm(),'\\','')
gotype = s.OntologyType()
if gotype[0] == 'C' or gotype[0] == 'c':
try: component_db[gene].append(go_name)
except KeyError: component_db[gene] = [go_name]
elif gotype[0] == 'P' or gotype[0] == 'p' or gotype[0] == 'b':
try: process_db[gene].append(go_name)
except KeyError: process_db[gene] = [go_name]
elif gotype[0] == 'F' or gotype[0] == 'f' or gotype[0] == 'm':
try: function_db[gene].append(go_name)
except KeyError: function_db[gene] = [go_name]
for gene in gene_to_mapp_ens: all_genes[gene]=[]
for gene in all_genes:
component_go=[]; process_go=[]; function_go=[]; pathways=[]
if gene in component_db: component_go = component_db[gene]
if gene in function_db: function_go = function_db[gene]
if gene in process_db: process_go = process_db[gene]
if gene in gene_to_mapp_ens: pathways = gene_to_mapp_ens[gene]
pi=PathwayInformation(component_go,function_go,process_go,pathways)
all_genes[gene]=pi
end_time = time.time(); time_diff = int(end_time-start_time)
print len(all_genes),'Ensembl GO/pathway annotations imported in',time_diff, 'seconds'
return all_genes
def getArrayIDAnnotations(uid_to_gene,gene_annotations,gene_type):
for uid in uid_to_gene:
gene = uid_to_gene[uid][0]
if gene in gene_annotations:
s = gene_annotations[gene]
if len(s.Symbol()) > 0:
array_symbols[uid] = s.Symbol()
array_descriptions[uid] = s.Description()
def combineDBs(db1,db2):
for i in db1:
try: db1[i]+=db2[i]
except Exception: null=[]
for i in db2:
try: db2[i]+=db1[i]
except Exception: db1[i]=db2[i]
db1 = eliminate_redundant_dict_values(db1)
return db1
def annotateGOElitePathways(pathway_type,go_annotations,gene_to_pathway,gene_to_uid):
array_pathway_db={}; determine_best_geneID={}
for gene in gene_to_uid:
#if gene == 'ENSG00000233911': print gene_to_uid[gene],len(gene_to_pathway[gene]),'b'
try: pathways = gene_to_pathway[gene]
except Exception: pathways=[]
for arrayid in gene_to_uid[gene]:
#if arrayid == '208286_x_at': print 'a',[gene]
for pathway in pathways:
try:
if pathway_type == 'GO':
s = go_annotations[pathway]
go_name = string.replace(s.OntologyTerm(),'\\','')
try: array_pathway_db[arrayid,s.OntologyType()].append(go_name)
except Exception: array_pathway_db[arrayid,s.OntologyType()] = [go_name]
else:
try: array_pathway_db[arrayid].append(pathway + '(WikiPathways)')
except Exception: array_pathway_db[arrayid] = [pathway + '(WikiPathways)']
except Exception: null=[] ### if GOID not found in annotation database
if pathway_type == 'GO':
try: determine_best_geneID[arrayid].append([len(pathways),gene])
except Exception: determine_best_geneID[arrayid]=[[len(pathways),gene]]
array_pathway_db = eliminate_redundant_dict_values(array_pathway_db)
if pathway_type == 'GO':
### First, see which gene has the best GO annotations for an arrayID
selected_array_gene={}
for arrayid in determine_best_geneID:
if len(determine_best_geneID[arrayid])>1:
determine_best_geneID[arrayid].sort()
count,gene = determine_best_geneID[arrayid][-1] ### gene with the most GO annotations associated
### The below is code that appears to be necessary when non-chromosomal Ensembl genes with same name and annotation
### are present. When this happens, the lowest sorted Ensembl tends to be the real chromosomal instance
determine_best_geneID[arrayid].reverse()
#if arrayid == '208286_x_at': print determine_best_geneID[arrayid]
for (count2,gene2) in determine_best_geneID[arrayid]:
#if arrayid == '208286_x_at': print count2,gene2
if count == count2: gene = gene2
else: break
selected_array_gene[arrayid] = gene
component_db={}; process_db={}; function_db={}; determine_best_geneID=[]
for (arrayid,gotype) in array_pathway_db:
if string.lower(gotype[0]) == 'c':
component_db[arrayid] = array_pathway_db[(arrayid,gotype)]
elif string.lower(gotype[0]) == 'b' or string.lower(gotype[0]) == 'p':
process_db[arrayid] = array_pathway_db[(arrayid,gotype)]
if string.lower(gotype[0]) == 'm' or string.lower(gotype[0]) == 'f':
function_db[arrayid] = array_pathway_db[(arrayid,gotype)]
return component_db,process_db,function_db,selected_array_gene
else: return array_pathway_db
def extractPathwayData(terms,type,version):
goids = []; go_names = []
buffer = ' /// '; small_buffer = ' // '
go_entries = string.split(terms,buffer)
for go_entry in go_entries:
go_entry_info = string.split(go_entry,small_buffer)
try:
if len(go_entry_info)>1:
if version == 1:
if type == 'GO': ### 6310 // DNA recombination // inferred from electronic annotation ///
goid, go_name, source = go_entry_info
while len(goid)< 7: goid = '0'+goid
goid = 'GO:'+goid
else: ### Calcium signaling pathway // KEGG ///
go_name, source = go_entry_info; goid = ''
if len(go_name)>1: go_name = source+'-'+go_name
else: go_name = ''
if version == 2:
if type == 'GO': ### NM_153254 // GO:0006464 // protein modification process // inferred from electronic annotation ///
try: accession, goid, go_name, source = go_entry_info
except ValueError: accession = go_entry_info[0]; goid = go_entry_info[1]; go_name = ''; source = ''
else: ### AF057061 // GenMAPP // Matrix_Metalloproteinases
accession, go_name, source = go_entry_info; goid = ''
if len(go_name)>1: go_name = source+'-'+go_name
else: go_name = ''
goids.append(goid); go_names.append(go_name)
except IndexError: goids = goids
if extract_go_names != 'yes': go_names = [] ### Then don't store (save memory)
goids = unique.unique(goids); go_names = unique.unique(go_names)
return goids, go_names
def exportResultsSummary(dir_list,species,type):
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite'
else: parent_dir = 'Databases'
if overwrite_previous == 'over-write previous':
if program_type != 'AltAnalyze':
try: from import_scripts import OBO_import
except Exception: import OBO_import
OBO_import.exportVersionData(0,'0/0/0','/'+species+'/nested/') ### Erase the existing file so that the database is re-remade
else: parent_dir = 'NewDatabases'
new_file = parent_dir+'/'+species+'/'+type+'_files_summarized.txt'
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[1]+'/'+today[2]+'/'+today[0]
fn=filepath(new_file); data = open(fn,'w')
for filename in dir_list: data.write(filename+'\t'+today+'\n')
try:
if parse_wikipathways == 'yes': data.write(wikipathways_file+'\t'+today+'\n')
except Exception: null=[]
data.close()
def exportMetaGeneData(species):
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite'
else: parent_dir = 'Databases'
if overwrite_previous == 'over-write previous':
if program_type != 'AltAnalyze':
null = None
#from import_scripts import OBO_import
#OBO_import.exportVersionData(0,'0/0/0','/'+species+'/nested/') ### Erase the existing file so that the database is re-remade
else: parent_dir = 'NewDatabases'
new_file = parent_dir+'/'+species+'/uid-gene/Ensembl_EntrezGene-meta.txt'
data = export.ExportFile(new_file)
for (primary,gene) in meta: data.write(primary+'\t'+gene+'\n')
data.close()
def importMetaGeneData(species):
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite'
else: parent_dir = 'Databases'
filename = parent_dir+'/'+species+'/uid-gene/Ensembl_EntrezGene-meta.txt'
fn=filepath(filename)
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
primary,gene = string.split(data,'\t')
meta[primary,gene]=[]
def exportRelationshipDBs(species):
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite'
else: parent_dir = 'Databases'
if overwrite_previous == 'over-write previous':
if program_type != 'AltAnalyze':
try: from import_scripts import OBO_import
except Exception: import OBO_import
OBO_import.exportVersionData(0,'0/0/0','/'+species+'/nested/') ### Erase the existing file so that the database is re-remade
else: parent_dir = 'NewDatabases'
x1=0; x2=0; x3=0; x4=0; x5=0; x6=0
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[1]+'/'+today[2]+'/'+today[0]
import UI; header = 'GeneID'+'\t'+'GOID'+'\n'
ens_annotations_found = verifyFile('Databases/'+species+'/gene/Ensembl.txt')
if process_go == 'yes': ens_process_go = 'yes'; eg_process_go = 'yes'
else:
ens_process_go = 'no'; eg_process_go = 'no'
eg_go_found = verifyFile('Databases/'+species+'/gene-go/EntrezGene-GeneOntology.txt')
ens_go_found = verifyFile('Databases/'+species+'/gene-go/Ensembl-GeneOntology.txt')
if eg_go_found == 'no': eg_process_go = 'yes'
if ens_go_found == 'no': ens_process_go = 'yes'
for probeset in affy_annotation_db:
ai = affy_annotation_db[probeset]
ensembls = unique.unique(ai.Ensembl()); entrezs = unique.unique(ai.Entrez())
for ensembl in ensembls:
if len(ensembl)>0:
if x1 == 0: ### prevents the file from being written if no data present
new_file1 = parent_dir+'/'+species+'/uid-gene/Ensembl-Affymetrix.txt'
data1 = export.ExportFile(new_file1); x1=1
data1.write(ensembl+'\t'+probeset+'\n')
if ens_process_go == 'yes':
goids = unique.unique(ai.GOIDs())
for goid_ls in goids:
for goid in goid_ls:
if len(goid)>0:
if x4==0:
new_file4 = parent_dir+'/'+species+'/gene-go/Ensembl-GeneOntology.txt'
data4 = export.ExportFile(new_file4); data4.write(header); x4=1
data4.write(ensembl+'\t'+goid+'\n')
for entrez in entrezs:
if len(entrez)>0:
if x2 == 0:
new_file2 = parent_dir+'/'+species+'/uid-gene/EntrezGene-Affymetrix.txt'
data2 = export.ExportFile(new_file2); x2=1
data2.write(entrez+'\t'+probeset+'\n')
if eg_process_go == 'yes':
goids = unique.unique(ai.GOIDs())
for goid_ls in goids:
for goid in goid_ls:
if len(goid)>0:
if x5==0:
new_file5 = parent_dir+'/'+species+'/gene-go/EntrezGene-GeneOntology.txt'
data5 = export.ExportFile(new_file5); data5.write(header); x5=1
data5.write(entrez+'\t'+goid+'\n')
for geneid in gene_annotation_db:
ea = gene_annotation_db[geneid]
if len(geneid)>0 and 'ENS:' not in geneid:
if x3 == 0:
new_file3 = parent_dir+'/'+species+'/gene/EntrezGene.txt'
data3 = export.ExportFile(new_file3); x3=1
data3.write('UID'+'\t'+'Symbol'+'\t'+'Name'+'\t'+'Species'+'\t'+'Date'+'\t'+'Remarks'+'\n')
data3.write(geneid+'\t'+ea.Symbol()+'\t'+ea.Description()+'\t'+species+'\t'+today+'\t'+''+'\n')
elif ens_annotations_found == 'no' and 'ENS:' in geneid:
geneid = string.replace(geneid,'ENS:','')
if x6 == 0:
new_file6 = parent_dir+'/'+species+'/gene/EntrezGene.txt'
data6 = export.ExportFile(new_file6); x6=1
data6.write('UID'+'\t'+'Symbol'+'\t'+'Name'+'\n')
data6.write(geneid+'\t'+ea.Symbol()+'\t'+ea.Description()+'\n')
if x1==1: data1.close()
if x2==1: data2.close()
if x3==1: data3.close()
if x4==1: data4.close()
if x5==1: data5.close()
if x6==1: data6.close()
def swapKeyValues(db):
swapped={}
for key in db:
values = list(db[key]) ###If the value is not a list, make a list
for value in values:
try: swapped[value].append(key)
except KeyError: swapped[value] = [key]
swapped = eliminate_redundant_dict_values(swapped)
return swapped
def integratePreviousAssociations():
print 'Integrating associations from previous databases...'
#print len(entrez_annotations),len(ens_to_uid), len(entrez_to_uid)
for gene in entrez_annotations:
if gene not in gene_annotation_db:
### Add previous gene information to the new database
y = entrez_annotations[gene]
z = InferredEntrezInformation(y.Symbol(),gene,y.Description())
gene_annotation_db[gene] = z
uid_to_ens = swapKeyValues(ens_to_uid); uid_to_entrez = swapKeyValues(entrez_to_uid)
###Add prior missing gene relationships for all probesets in the new database and that are missing
for uid in uid_to_ens:
if uid in affy_annotation_db:
y = affy_annotation_db[uid]
ensembl_ids = uid_to_ens[uid]
if y.Ensembl() == ['']: y.resetEnsembl(ensembl_ids)
else:
ensembl_ids = unique.unique(y.Ensembl()+ensembl_ids)
y.resetEnsembl(ensembl_ids)
else:
ensembl_ids = uid_to_ens[uid]; entrez_ids = []
if uid in uid_to_entrez: entrez_ids = uid_to_entrez[uid]
ai = AffymetrixInformation(uid, '', ensembl_ids, entrez_ids, [], [], '',[],[],[])
affy_annotation_db[uid] = ai
for uid in uid_to_entrez:
if uid in affy_annotation_db:
y = affy_annotation_db[uid]
entrez_ids = uid_to_entrez[uid]
if y.Entrez() == ['']: y.resetEntrez(entrez_ids)
else:
entrez_ids = uid_to_entrez[uid]; ensembl_ids = []
if uid in uid_to_ens: ensembl_ids = uid_to_ens[uid]
ai = AffymetrixInformation(uid, '', ensembl_ids, entrez_ids, [], [], '',[],[],[])
affy_annotation_db[uid] = ai
def parseGene2GO(tax_id,species,overwrite_prev,rewrite_existing):
global overwrite_previous; overwrite_previous = overwrite_prev; status = 'run'
program_type,database_dir = unique.whatProgramIsThis()
#if program_type == 'AltAnalyze': database_dir = '/AltDatabase'
database_dir = '/BuildDBs'
import_dir = database_dir+'/Entrez/Gene2GO'
g = GrabFiles(); g.setdirectory(import_dir)
filename = g.searchdirectory('gene2go') ###Identify gene files corresponding to a particular MOD
if len(filename)>1:
fn=filepath(filename); gene_go=[]; x = 0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x == 0: x = 1 ###skip the first line
else:
taxid=t[0];entrez=t[1];goid=t[2]
if taxid == tax_id: gene_go.append([entrez,goid])
else: status = 'not run'
if len(gene_go)>0:
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite'
else: parent_dir = 'Databases'
if overwrite_previous == 'over-write previous':
if program_type != 'AltAnalyze':
try: from import_scripts import OBO_import
except Exception: import OBO_import
OBO_import.exportVersionData(0,'0/0/0','/'+species+'/nested/') ### Erase the existing file so that the database is re-remade
else: parent_dir = 'NewDatabases'
new_file = parent_dir+'/'+species+'/gene-go/EntrezGene-GeneOntology.txt'
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[1]+'/'+today[2]+'/'+today[0]
from build_scripts import EnsemblSQL
headers = ['EntrezGene ID','GO ID']
EnsemblSQL.exportListstoFiles(gene_go,headers,new_file,rewrite_existing)
print 'Exported',len(gene_go),'gene-GO relationships for species:',species
exportResultsSummary(['Gene2GO.zip'],species,'EntrezGene_GO')
else: print 'No NCBI Gene Ontology support for this species'
return 'run'
def getMetaboliteIDTranslations(species_code):
mod = 'HMDB'; meta_metabolite_db={}
meta_metabolite_db = importMetaboliteIDs(species_code,mod,'CAS',meta_metabolite_db)
meta_metabolite_db = importMetaboliteIDs(species_code,mod,'ChEBI',meta_metabolite_db)
meta_metabolite_db = importMetaboliteIDs(species_code,mod,'PubChem',meta_metabolite_db)
return meta_metabolite_db
def importMetaboliteIDs(species_code,mod,source,meta_metabolite_db):
mod_source = mod+'-'+source
gene_to_source_id = gene_associations.getGeneToUid(species_code,('hide',mod_source))
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
#print mod_source, 'relationships imported'
meta_metabolite_db[source] = source_to_gene
return meta_metabolite_db
def importWikipathways(system_codes,incorporate_previous_associations,process_go,species_full,species,integrate_affy_associations,relationship_type,overwrite_affycsv):
global wikipathways_file; global overwrite_previous
overwrite_previous = overwrite_affycsv
database_dir = '/BuildDBs'
import_dir = database_dir+'/wikipathways'
g = GrabFiles(); g.setdirectory(import_dir); wikipathway_gene_db={}; eg_wikipathway_db={}; ens_wikipathway_db={}
search_term = relationship_type+'_data_'+species_full
#search_term = 'wikipathways'
filename = g.searchdirectory(search_term) ###Identify gene files corresponding to a particular MOD
#print "Parsing",filename; wikipathways_file = string.split(filename,'/')[-1]
#print "Extracting data for species:",species_full,species
if len(filename)>1:
fn=filepath(filename); gene_go={}; x = 0
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
data = string.replace(data,'MGI:','')
t = string.split(data,'\t')
if x == 0:
x = 1; y = 0
while y < len(t):
if 'Ensembl' in t[y]: ens = y
if 'UniGene' in t[y]: ug = y
if 'Entrez' in t[y]: ll = y
if 'SwissProt' in t[y]: sp = y
if 'Uniprot' in t[y]: sp = y
if 'RefSeq' in t[y]: rt = y
if 'MOD' in t[y]: md= y
if 'Pathway Name' in t[y]: pn = y
if 'Organism' in t[y]: og= y
if 'Url to WikiPathways' in t[y]: ur= y
if 'PubChem' in t[y]: pc = y
if 'CAS' in t[y]: cs = y
if 'ChEBI' in t[y]: cb = y
y += 1
else:
try: ensembl = t[ens]; unigene = t[ug]; uniprot = t[sp]; refseq = t[rt]; mod = t[md]; entrez = t[ll]
except Exception: print '\nWARNING...errors were encountered when processing the line',[line]; print 'Errors in the WP file are present!!!!\n'; print [last_line]; sys.exit(); continue
last_line = line
ensembl = splitEntry(ensembl); unigene = splitEntry(unigene); uniprot = splitEntry(uniprot)
pathway_name = t[pn]; organism = t[og]; wikipathways_url = t[ur]; entrez = splitEntry(entrez);
refseq = splitEntry(refseq); mod = splitOthers(mod); mod2 = []
try: pubchem = t[pc]; cas = t[cs]; chemEBI = t[cb]
except Exception:
pubchem=''; cas=''; chemEBI=''
if x==1: print 'WARNING!!! Metabolite Identifiers missing from WikiPathways file.'
x+=1
pubchem = splitEntry(pubchem); cas = splitEntry(cas); chemEBI = splitEntry(chemEBI)
htp,url,wpid = string.split(wikipathways_url,':')
pathway_name = pathway_name +':'+ wpid
for m in mod: mod2.append('mod:'+m); mod = mod2
#relationship_type
gene_ids = mod+ensembl+unigene+uniprot+refseq+entrez
if organism == species_full:
if relationship_type == 'mapped':
for id in pubchem:
try: wikipathway_gene_db[id].append(('PubChem',pathway_name))
except Exception: wikipathway_gene_db[id] = [('PubChem',pathway_name)]
for id in cas:
try: wikipathway_gene_db[id].append(('CAS',pathway_name))
except Exception: wikipathway_gene_db[id] = [('CAS',pathway_name)]
for id in chemEBI:
try: wikipathway_gene_db[id].append(('ChEBI',pathway_name))
except Exception: wikipathway_gene_db[id] = [('ChEBI',pathway_name)]
else:
for gene_id in gene_ids:
if len(gene_id)>1:
try: wikipathway_gene_db[gene_id].append(pathway_name)
except KeyError: wikipathway_gene_db[gene_id] = [pathway_name]
for gene_id in ensembl:
if len(gene_id)>1:
try: ens_wikipathway_db[pathway_name].append(gene_id)
except KeyError: ens_wikipathway_db[pathway_name] = [gene_id]
for gene_id in entrez:
if len(gene_id)>1:
try: eg_wikipathway_db[pathway_name].append(gene_id)
except KeyError: eg_wikipathway_db[pathway_name] = [gene_id]
if incorporate_previous_associations == 'yes':
if relationship_type == 'mapped':
try: gene_to_mapp = gene_associations.importGeneMAPPData(species,'HMDB-MAPP.txt')
except Exception: gene_to_mapp = {}
for id in gene_to_mapp:
for pathway_name in gene_to_mapp[id]:
try: wikipathway_gene_db[id].append(('HMDB',pathway_name))
except Exception: wikipathway_gene_db[id] = [('HMDB',pathway_name)]
else:
try: gene_to_mapp = gene_associations.importGeneMAPPData(species,'EntrezGene-MAPP.txt')
except Exception: gene_to_mapp = {}
for id in gene_to_mapp:
for pathway_name in gene_to_mapp[id]:
try: wikipathway_gene_db[id].append(pathway_name)
except Exception: wikipathway_gene_db[id] = [pathway_name]
try: gene_to_mapp = gene_associations.importGeneMAPPData(species,'Ensembl-MAPP.txt')
except Exception: gene_to_mapp = {}
for id in gene_to_mapp:
for pathway_name in gene_to_mapp[id]:
try: wikipathway_gene_db[id].append(pathway_name)
except Exception: wikipathway_gene_db[id] = [pathway_name]
if relationship_type == 'mapped':
hmdb_wikipathway_db={}
try:
meta_metabolite_db = getMetaboliteIDTranslations(species)
for id in wikipathway_gene_db:
for (system,pathway) in wikipathway_gene_db[id]:
if system == 'HMDB':
try: hmdb_wikipathway_db[pathway].append(id)
except KeyError: hmdb_wikipathway_db[pathway] = [id]
else:
id_to_mod = meta_metabolite_db[system]
if len(id)>0 and id in id_to_mod:
mod_ids = id_to_mod[id]
for mod_id in mod_ids:
try: hmdb_wikipathway_db[pathway].append(mod_id)
except KeyError: hmdb_wikipathway_db[pathway] = [mod_id]
hmdb_wikipathway_db = eliminate_redundant_dict_values(hmdb_wikipathway_db)
ad = system_codes['HMDB']
try: system_code = ad.SystemCode()
except Exception: system_code = ad
if len(hmdb_wikipathway_db)>0: exportGeneToMAPPs(species,'HMDB',system_code,hmdb_wikipathway_db)
except ValueError: null=[] ### Occurs with older versions of GO-Elite
else:
#print "Number of unique gene IDs linked to Wikipathways for species:",len(wikipathway_gene_db)
parse_wikipathways = 'yes'
buildAffymetrixCSVAnnotations(species,incorporate_previous_associations,process_go,parse_wikipathways,integrate_affy_associations,overwrite_affycsv)
grabEliteDbaseMeta(species) ### Similiar to buildAffymetrixCSVAnnotations, grabs many-to-one gene associations for various systems
"""global affy_annotation_db; affy_annotation_db={}; global gene_annotation_db; gene_annotation_db = {}
global ens_to_uid; global entrez_to_uid; global entrez_annotations; global parse_wikipathways
parse_wikipathways = 'yes'
import_dir = '/BuildDBs/Affymetrix/'+species
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for affy_data in dir_list: #loop through each file in the directory to output results
affy_data_dir = 'BuildDBs/Affymetrix/'+species+'/'+affy_data
parse_affymetrix_annotations(affy_data_dir,species)"""
#if len(affy_annotation_db)>0 or len(meta)>0:
try:
### This is a bit redundant with the before handling of meta, by addition gene-gene relationships are now included
entrez_relationships={}; ens_relationships={}; meta2={}
for (primary,gene) in meta:
try:
null = int(primary)
if 'ENS' in gene:
try: entrez_relationships[primary].append(gene)
except Exception: entrez_relationships[primary] = [gene]
try: ens_relationships[gene].append(primary)
except Exception: ens_relationships[gene] = [primary]
except Exception:
try:
null = int(gene)
if 'ENS' in primary:
try: entrez_relationships[gene].append(primary)
except Exception: entrez_relationships[gene] = [primary]
try: ens_relationships[primary].append(gene)
except Exception: ens_relationships[primary] = [gene]
except Exception: null=[]
ens_relationships=eliminate_redundant_dict_values(ens_relationships)
entrez_relationships=eliminate_redundant_dict_values(entrez_relationships)
for id1 in entrez_relationships:
if len(entrez_relationships[id1])<3:
for id2 in entrez_relationships[id1]: meta2[id2,id1] = []
for id1 in ens_relationships:
if len(ens_relationships[id1])<3:
for id2 in ens_relationships[id1]: meta2[id2,id1] = []
#print len(meta), "gene relationships imported"
#print len(wikipathway_gene_db), "gene IDs extracted from Wikipathway pathways"
"""for (primary,gene_id) in meta:
if gene_id == 'NP_598767': print wikipathway_gene_db[gene_id];kill"""
### Since relationships are inferred in new versions of the WikiPathways tables, we don't require meta (no longer true - reverted to old method)
for (primary,gene_id) in meta2:
try:
pathway_names = wikipathway_gene_db[gene_id]
for pathway in pathway_names:
#if pathway == 'Mitochondrial tRNA Synthetases:WP62': print [gene_id], primary
if 'ENS' in primary:
try: ens_wikipathway_db[pathway].append(primary)
except KeyError: ens_wikipathway_db[pathway] = [primary]
if primary in ens_eg_db:
for entrez in ens_eg_db[primary]:
try: eg_wikipathway_db[pathway].append(entrez)
except KeyError: eg_wikipathway_db[pathway] = [entrez]
else:
try:
check = int(primary) ### Ensure this is numeric - thus EntrezGene
try: eg_wikipathway_db[pathway].append(primary)
except KeyError: eg_wikipathway_db[pathway] = [primary]
if primary in ens_eg_db:
for ens in ens_eg_db[primary]:
try: ens_wikipathway_db[pathway].append(ens)
except KeyError: ens_wikipathway_db[pathway] = [ens]
except Exception:
### Occurs for Ensembl for species like Yeast which don't have "ENS" in the ID
try: ens_wikipathway_db[pathway].append(primary)
except KeyError: ens_wikipathway_db[pathway] = [primary]
if primary in ens_eg_db:
for entrez in ens_eg_db[primary]:
try: eg_wikipathway_db[pathway].append(entrez)
except KeyError: eg_wikipathway_db[pathway] = [entrez]
except KeyError: null=[]
"""
for pathway in eg_wikipathway_db:
if 'ytoplasmic' in pathway: print pathway, len(eg_wikipathway_db[pathway]),len(ens_wikipathway_db[pathway]),len(ens_eg_db);sys.exit()
"""
#print len(eg_wikipathway_db), len(ens_wikipathway_db)
#print system_codes
ad = system_codes['EntrezGene']
try: system_code = ad.SystemCode()
except Exception: system_code = ad
if len(eg_wikipathway_db)>0:
exportGeneToMAPPs(species,'EntrezGene',system_code,eg_wikipathway_db)
ad = system_codes['Ensembl']
try: system_code = ad.SystemCode()
except Exception: system_code = ad
if len(ens_wikipathway_db)>0:
exportGeneToMAPPs(species,'Ensembl',system_code,ens_wikipathway_db)
return len(meta)
except ValueError: return 'ValueError'
else: return 'not run'
def addToMeta(db):
for i in db:
for k in db[i]: meta[i,k]=[]
def grabEliteDbaseMeta(species_code):
import gene_associations
db = gene_associations.getRelated(species_code,'Ensembl-'+'Uniprot'); addToMeta(db)
db = gene_associations.getRelated(species_code,'Ensembl-'+'RefSeq'); addToMeta(db)
db = gene_associations.getRelated(species_code,'Ensembl-'+'UniGene'); addToMeta(db)
db = gene_associations.getRelated(species_code,'Ensembl-'+'EntrezGene'); addToMeta(db)
def splitEntry(str_value):
str_list = string.split(str_value,',')
return str_list
def splitOthers(str_value):
str_list = string.split(str_value,',')
str_list2=[]
for i in str_list:
i = string.split(i,'(')[0]
str_list2.append(i)
return str_list2
def exportGeneToMAPPs(species,system_name,system_code,wikipathway_db):
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': parent_dir = 'AltDatabase/goelite'
else: parent_dir = 'Databases'
if overwrite_previous == 'over-write previous':
if program_type != 'AltAnalyze':
#from import_scripts import OBO_import; OBO_import.exportVersionData(0,'0/0/0','/'+species+'/nested/') ### Erase the existing file so that the database is re-remade
null = None
else: parent_dir = 'NewDatabases'
new_file = parent_dir+'/'+species+'/gene-mapp/'+system_name+'-MAPP.txt'
#print "Exporting",new_file
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[1]+'/'+today[2]+'/'+today[0]
y=0
data1 = export.ExportFile(new_file)
data1.write('UID'+'\t'+'SystemCode'+'\t'+'MAPP'+'\n')
#print len(wikipathway_db)
for pathway in wikipathway_db:
gene_ids = unique.unique(wikipathway_db[pathway])
#print gene_ids;kill
for gene_id in gene_ids: data1.write(gene_id+'\t'+system_code+'\t'+pathway+'\n'); y+=1
data1.close()
#print 'Exported',y,'gene-MAPP relationships for species:',species, 'for',len(wikipathway_db),'pathways.'
def extractAndIntegrateAffyData(species,integrate_affy_associations,Parse_wikipathways):
#print integrate_affy_associations
global affy_annotation_db; affy_annotation_db={}; global gene_annotation_db; gene_annotation_db = {}
global parse_wikipathways; global meta; meta = {}; global ens_eg_db; ens_eg_db={}
parse_wikipathways = Parse_wikipathways
program_type,database_dir = unique.whatProgramIsThis()
if program_type == 'AltAnalyze': database_dir = '/AltDatabase/affymetrix'
else: database_dir = '/BuildDBs/Affymetrix'
import_dir = database_dir+'/'+species
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for affy_data in dir_list: #loop through each file in the directory to output results
affy_data_dir = database_dir[1:]+'/'+species+'/'+affy_data
if '.csv' in affy_data_dir:
if '.zip' in affy_data_dir:
### unzip the file
print "Extracting the zip file:",filepath(affy_data_dir)
update.unzipFiles(affy_data,filepath(database_dir[1:]+'/'+species+'/'))
try:
print "Removing the zip file:",filepath(affy_data_dir)
os.remove(filepath(affy_data_dir)); status = 'removed'
except Exception: null=[] ### Not sure why this error occurs since the file is not open
affy_data_dir = string.replace(affy_data_dir,'.zip','')
parse_affymetrix_annotations(affy_data_dir,species)
if len(affy_annotation_db)>0:
print 'Affymetrix CSV annotations imported..'
if integrate_affy_associations == 'yes':
exportAffymetrixCSVAnnotations(species,dir_list)
if parse_wikipathways == 'yes':
if len(meta) > 0:
try: exportMetaGeneData(species)
except Exception: null=[]
def exportAffymetrixCSVAnnotations(species,dir_list):
import gene_associations; global entrez_annotations
global ens_to_uid; global entrez_to_uid
if incorporate_previous_associations == 'yes':
###dictionary gene to unique array ID
mod_source1 = 'Ensembl'+'-'+'Affymetrix'; mod_source2 = 'EntrezGene'+'-'+'Affymetrix'
try: ens_to_uid = gene_associations.getGeneToUid(species,mod_source1)
except Exception: ens_to_uid = {}
try: entrez_to_uid = gene_associations.getGeneToUid(species,mod_source2)
except Exception: entrez_to_uid = {}
### Gene IDs with annotation information
try: entrez_annotations = gene_associations.importGeneData(species,'EntrezGene')
except Exception: entrez_annotations = {}
### If we wish to combine old and new GO relationships - Unclear if this is a good idea
"""if process_go == 'yes':
entrez_to_go = gene_associations.importGeneGOData(species,'EntrezGene','null')
ens_to_go = gene_associations.importGeneGOData(species,'Ensembl','null')"""
integratePreviousAssociations()
if len(gene_annotation_db)>1:
exportRelationshipDBs(species)
exportResultsSummary(dir_list,species,'Affymetrix')
def importAffymetrixAnnotations(dir,Species,Process_go,Extract_go_names,Extract_pathway_names):
global species; global process_go; global extract_go_names; global extract_pathway_names
global affy_annotation_db; affy_annotation_db={}; global parse_wikipathways
parse_wikipathways = 'no'
species = Species; process_go = Process_go; extract_go_names = Extract_go_names; extract_pathway_names = Extract_pathway_names
import_dir = '/'+dir+'/'+species
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
print 'Parsing Affymetrix Annotation files...',
for affy_data in dir_list: #loop through each file in the directory to output results
affy_data_dir = dir+'/'+species+'/'+affy_data
if '.csv' in affy_data_dir: version = parse_affymetrix_annotations(affy_data_dir,species)
print 'done'
return affy_annotation_db, version
def buildAffymetrixCSVAnnotations(Species,Incorporate_previous_associations,Process_go,parse_wikipathways,integrate_affy_associations,overwrite_affycsv):
global incorporate_previous_associations; global process_go; global species; global extract_go_names
global wikipathways_file; global overwrite_previous; overwrite_previous = overwrite_affycsv
global extract_pathway_names; extract_go_names = 'no'; extract_pathway_names = 'no'
process_go = Process_go; incorporate_previous_associations = Incorporate_previous_associations; species = Species
extractAndIntegrateAffyData(species,integrate_affy_associations,parse_wikipathways)
def importSystemInfo():
import UI
filename = 'Config/source_data.txt'; x=0
fn=filepath(filename); global system_list; system_list=[]; global system_codes; system_codes={}; mod_list=[]
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
sysname=t[0];syscode=t[1]
try: mod = t[2]
except KeyError: mod = ''
if x==0: x=1
else:
system_list.append(sysname)
ad = UI.SystemData(syscode,sysname,mod)
if len(mod)>1: mod_list.append(sysname)
system_codes[sysname] = ad
return system_codes
def TimeStamp():
import time
time_stamp = time.localtime()
year = str(time_stamp[0]); month = str(time_stamp[1]); day = str(time_stamp[2])
if len(month)<2: month = '0'+month
if len(day)<2: day = '0'+day
return year+month+day
if __name__ == '__main__':
getEnsemblAnnotationsFromGOElite('Hs');sys.exit()
Species_full = 'Rattus norvegicus'; Species_code = 'Rn'; tax_id = '10090'; overwrite_affycsv = 'yes'
System_codes = importSystemInfo(); process_go = 'yes'; incorporate_previous_associations = 'yes'
import update; overwrite = 'over-write previous'
import time; start_time = time.time()
getUIDAnnotationsFromGOElite({},'Hs','Agilent','yes')
end_time = time.time(); time_diff = int(end_time-start_time); print time_diff, 'seconds'; kill
#buildAffymetrixCSVAnnotations(Species_code,incorporate_previous_associations,process_go,'no',integrate_affy_associations,overwrite);kill
species_code = Species_code; parseGene2GO(tax_id,species_code,overwrite,'no');kill
date = TimeStamp(); file_type = ('wikipathways_'+date+'.tab','.txt')
fln,status = update.download('http://www.wikipathways.org/wpi/pathway_content_flatfile.php?output=tab','BuildDBs/wikipathways/',file_type)
status = ''
if 'Internet' not in status:
importWikipathways(System_codes,incorporate_previous_associations,process_go,Species_full,Species_code,'no',overwrite)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BuildAffymetrixAssociations.py
|
BuildAffymetrixAssociations.py
|
import sys,string,os,math
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
def getFiles(sub_dir):
dir_list = os.listdir(sub_dir); dir_list2 = []
for entry in dir_list:
if '.csv' in entry: dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineCSVFiles(root_dir):
files = getFiles(root_dir)
import export
folder = export.getParentDir(root_dir+'/blah.txt')
first_file = True
genes=[]
cells=[]
data_matrices=[]
for file in files:
cells.append(file[:-4])
matrix=[]
for line in open(root_dir+'/'+file,'rU').xreadlines():
data = cleanUpLine(line)
gene,count = string.split(data,',')
if first_file:
genes.append(gene)
matrix.append(float(count))
data_matrices.append(matrix)
first_file=False
data_matrices = zip(*data_matrices)
export_object = open(root_dir+'/'+folder+'-counts.txt','w')
headers = string.join(['UID']+cells,'\t')+'\n'
export_object.write(headers)
index=0
for geneID in genes:
values = string.join([geneID]+map(str,list(data_matrices[index])),'\t')+'\n'
export_object.write(values)
index+=1
export_object.close()
data_matrices = zip(*data_matrices)
index=0
for cell in cells:
matrix = data_matrices[index]
barcode_sum = sum(matrix)
data_matrices[index] = map(lambda val: math.log((10000.00*val/barcode_sum)+1.0,2), matrix)
index+=1
data_matrices = zip(*data_matrices)
export_object = open(root_dir+'/'+folder+'-CPTT.txt','w')
headers = string.join(['UID']+cells,'\t')+'\n'
export_object.write(headers)
index=0
for geneID in genes:
values = string.join([geneID]+map(str,list(data_matrices[index])),'\t')+'\n'
export_object.write(values)
index+=1
export_object.close()
if __name__ == '__main__':
################ Comand-line arguments ################
import getopt
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','GeneType=', 'IDType='])
for opt, arg in options:
if opt == '--i': input_dir=arg
combineCSVFiles(input_dir)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/combineCSV.py
|
combineCSV.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""" Batch script for extracting many junction.bed and building exon.bed files from
an input set of BAM files in a directory. Requires a reference text file containing
exon regions (currently provided from AltAnalyze - see ReferenceExonCoordinates
folder). Can produce only junction.bed files, only a combined exon reference or only
exon.bed files optionally. Can run using a single processor or multiple simultaneous
processes (--m flag)."""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import time
import shutil
import unique
import subprocess
from import_scripts import BAMtoJunctionBED
from import_scripts import BAMtoExonBED
import getopt
import traceback
################# General data import methods #################
def filepath(filename):
fn = unique.filepath(filename)
return fn
def cleanUpLine(line):
data = string.replace(line,'\n','')
data = string.replace(data,'\c','')
data = string.replace(data,'\r','')
data = string.replace(data,'"','')
return data
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def getFiles(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def parallelBAMProcessing(directory,refExonCoordinateFile,bed_reference_dir,analysisType=[],useMultiProcessing=False,MLP=None,root=None):
paths_to_run=[]
errors=[]
if '.bam' in directory:
### Allow a single BAM file to be specifically analyzed (e.g., bsub operation)
bam_file = directory
bam_file = string.replace(directory,'\\','/')
directory = string.join(string.split(directory,'/')[:-1],'/')
else:
bam_file = None
outputExonCoordinateRefBEDfile = str(bed_reference_dir)
bed_reference_dir = string.replace(bed_reference_dir,'\\','/')
### Check if the BAM files are located in the target folder (not in subdirectories)
files = getFiles(directory)
for file in files:
if '.bam' in file and '.bai' not in file:
source_file = directory+'/'+file
source_file = filepath(source_file)
output_filename = string.replace(file,'.bam','')
output_filename = string.replace(output_filename,'=','_')
destination_file = directory+'/'+output_filename+'__exon.bed'
destination_file = filepath(destination_file)
paths_to_run.append((source_file,refExonCoordinateFile,bed_reference_dir,destination_file))
### Otherwise, check subdirectories for BAM files
folders = getFolders(directory)
if len(paths_to_run)==0:
for top_level in folders:
try:
files = getFiles(directory+'/'+top_level)
for file in files:
if '.bam' in file and '.bai' not in file:
source_file = directory+'/'+file
source_file = filepath(source_file)
destination_file = directory+'/'+top_level+'__exon.bed'
destination_file = filepath(destination_file)
paths_to_run.append((source_file,refExonCoordinateFile,bed_reference_dir,destination_file))
except Exception: pass
### If a single BAM file is indicated
if bam_file != None:
output_filename = string.replace(bam_file,'.bam','')
output_filename = string.replace(output_filename,'=','_')
destination_file = output_filename+'__exon.bed'
paths_to_run = [(bam_file,refExonCoordinateFile,bed_reference_dir,destination_file)]
if 'reference' in analysisType and len(analysisType)==1:
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
sys.exit()
if useMultiProcessing:
pool_size = MLP.cpu_count()
if len(paths_to_run)<pool_size:
pool_size = len(paths_to_run)
print 'Using %d processes' % pool_size
if len(paths_to_run) > pool_size:
pool_size = len(paths_to_run)
if len(analysisType) == 0 or 'junction' in analysisType:
print 'Extracting junction alignments from BAM files...',
pool = MLP.Pool(processes=pool_size)
try: results = pool.map(runBAMtoJunctionBED, paths_to_run) ### worker jobs initiated in tandem
except ValueError:
print_out = '\WARNING!!! No Index found for the BAM files (.bam.bai). Sort and Index using Samtools prior to loading in AltAnalyze'
print traceback.format_exc()
if root!=None:
import UI
UI.WarningWindow(print_out,'Exit');sys.exit()
try:pool.close(); pool.join(); pool = None
except Exception: pass
print_out=None
for sample,missing in results:
if len(missing)>1:
print_out = '\nWarning!!! %s chromosomes not found in: %s (PySam platform-specific error)' % (string.join(missing,', '),sample)
if root!=None and print_out!=None:
try:
import UI
UI.WarningWindow(print_out,'Continue')
except Exception: pass
print len(paths_to_run), 'BAM files','processed'
if len(analysisType) == 0 or 'reference' in analysisType:
#print 'Building exon reference coordinates from Ensembl/UCSC and all junctions...',
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
#print 'completed'
print 'Extracting exon alignments from BAM files...',
if len(analysisType) == 0 or 'exon' in analysisType:
pool = MLP.Pool(processes=pool_size)
results = pool.map(runBAMtoExonBED, paths_to_run) ### worker jobs initiated in tandem
try:pool.close(); pool.join(); pool = None
except Exception: pass
print len(paths_to_run), 'BAM files','processed'
else:
if len(analysisType) == 0 or 'junction' in analysisType:
for i in paths_to_run:
runBAMtoJunctionBED(i)
if len(analysisType) == 0 or 'reference' in analysisType:
augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile)
if len(analysisType) == 0 or 'exon' in analysisType:
for i in paths_to_run:
runBAMtoExonBED(i)
def runBAMtoJunctionBED(paths_to_run):
bamfile_dir,refExonCoordinateFile,bed_reference_dir,output_bedfile_path = paths_to_run
output_bedfile_path = string.replace(bamfile_dir,'.bam','__junction.bed')
#if os.path.exists(output_bedfile_path) == False: ### Only run if the file doesn't exist
results = BAMtoJunctionBED.parseJunctionEntries(bamfile_dir,multi=True,ReferenceDir=refExonCoordinateFile)
#else: print output_bedfile_path, 'already exists.'
return results
def runBAMtoExonBED(paths_to_run):
bamfile_dir,refExonCoordinateFile,bed_reference_dir,output_bedfile_path = paths_to_run
if os.path.exists(output_bedfile_path) == False: ### Only run if the file doesn't exist
BAMtoExonBED.parseExonReferences(bamfile_dir,bed_reference_dir,multi=True,intronRetentionOnly=False)
else:
print output_bedfile_path, 'already exists... re-writing'
BAMtoExonBED.parseExonReferences(bamfile_dir,bed_reference_dir,multi=True,intronRetentionOnly=False)
def getChrFormat(directory):
### Determine if the chromosomes have 'chr' or nothing
files = getFiles(directory)
chr_status=True
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
t = string.split(line)
chr = t[0]
if 'chr' not in chr:
chr_status = False
break
break
return chr_status
def augmentExonReferences(directory,refExonCoordinateFile,outputExonCoordinateRefBEDfile):
print 'Building reference bed file from all junction.bed files'
splicesite_db={} ### reference splice-site database (we only want to add novel splice-sites to our reference)
real_splicesites={}
introns={}
novel_db={}
reference_toplevel = string.join(string.split(outputExonCoordinateRefBEDfile,'/')[:-1],'/')
try: os.mkdir(reference_toplevel) ### If the bed folder doesn't exist
except Exception: pass
chr_status = getChrFormat(directory)
o = open (outputExonCoordinateRefBEDfile,"w")
#refExonCoordinateFile = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Mm/Mm_Ensembl_exon.txt'
reference_rows=0
if '.gtf' in refExonCoordinateFile: firstLine = False
else: firstLine = True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
reference_rows+=1
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
if chr_status == False:
chr = string.replace(chr,'chr','')
o.write(string.join([chr,start,stop,geneID+':'+exon,'',strand],'\t')+'\n')
start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
splicesite_db[chr,start]=geneID
splicesite_db[chr,stop]=geneID
if 'I' in exon:
try: introns[geneID].append([start,stop])
except Exception: introns[geneID] = [[start,stop]]
files = getFiles(directory)
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'12', '6998470', '6998522', 'ENSG00000111671:E1.1_ENSE00001754003', '0', '-'
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
if strand == '-':
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
key = chr,exon1_stop,exon2_start
if (chr,exon1_stop) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon2_start) in splicesite_db: ### only include splice sites where one site is known
geneID = splicesite_db[(chr,exon2_start)]
novel_db[chr,exon1_stop,strand] = exon1_start,geneID,5
real_splicesites[chr,exon2_start]=None
elif (chr,exon2_start) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon1_stop) in splicesite_db: ### only include splice sites where one site is known
#if 121652702 ==exon2_start:
#print chr, exon1_start,exon1_stop,exon2_start,exon2_stop, strand;sys.exit()
geneID = splicesite_db[(chr,exon1_stop)]
novel_db[chr,exon2_start,strand] = exon2_stop,geneID,3
real_splicesites[chr,exon1_stop]=None
else:
real_splicesites[chr,exon1_stop]=None
real_splicesites[chr,exon2_start]=None
print len(novel_db), 'novel splice sites and', len(real_splicesites), 'known splice sites.'
gene_organized={}
for (chr,pos1,strand) in novel_db:
pos2,geneID,type = novel_db[(chr,pos1,strand)]
try: gene_organized[chr,geneID,strand].append([pos1,pos2,type])
except Exception: gene_organized[chr,geneID,strand] = [[pos1,pos2,type]]
def intronCheck(geneID,coords):
### see if the coordinates are within a given intron
try:
for ic in introns[geneID]:
if withinQuery(ic,coords):
return True
except Exception:
pass
def withinQuery(ls1,ls2):
imax = max(ls1)
imin = min(ls1)
qmax = max(ls2)
qmin = min(ls2)
if qmin >= imin and qmax <= imax:
return True
else:
return False
### Compare the novel splice site locations in each gene
added=[]
for (chr,geneID,strand) in gene_organized:
gene_organized[(chr,geneID,strand)].sort()
if strand == '-':
gene_organized[(chr,geneID,strand)].reverse()
i=0
set = gene_organized[(chr,geneID,strand)]
for (pos1,pos2,type) in set:
k = [pos1,pos2]
annotation='novel'
if i==0 and type == 3:
if len(set)>1:
if set[i+1][-1]==5:
l = [set[i+1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
elif type == 5:
if set[i-1][-1]==3:
l = [set[i-1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
k.sort(); i+=1
if k not in added:
values = string.join([chr,str(k[0]),str(k[1]),geneID+':'+annotation,'',strand],'\t')+'\n'
added.append(k)
o.write(values)
o.close()
if __name__ == '__main__':
import multiprocessing as mlp
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
#bam_dir = "H9.102.2.6.bam"
#outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a directory containing BAM files as input in the command-line"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --g /Users/me/ReferenceExonCoordinates/Hs_Ensembl_exon_hg19.txt --r /Users/me/ExonBEDRef/Hs_Ensembl_exon-cancer_hg19.bed --a exon --a junction --a reference"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --a junction"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r=','a=','m='])
for opt, arg in options:
if opt == '--i': bam_dir=arg
elif opt == '--g': refExonCoordinateFile=arg
elif opt == '--r': outputExonCoordinateRefBEDfile=arg
elif opt == '--a': analysisType.append(arg) ### options are: all, junction, exon, reference
elif opt == '--m': ### Run each BAM file on a different processor
if arg == 'yes': useMultiProcessing=True
elif arg == 'True': useMultiProcessing=True
else: useMultiProcessing=False
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if len(analysisType) == 0 or 'all' in analysisType:
analysisType = ['exon','junction','reference']
try:
refExonCoordinateFile = refExonCoordinateFile
outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception:
print 'Please provide a exon coordinate text file using the option --g and a output coordinate file path (--r) to generate exon.bed files'
analysisType = ['junction']
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
try: bam_dir = bam_dir
except Exception: print 'You must specify a directory of BAM files or a single bam file with --i';sys.exit()
try: refExonCoordinateFile = refExonCoordinateFile
except Exception: print 'You must specify a AltAnalyze exon coordinate text file with --g';sys.exit()
try: outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception: print 'You must specify an output path for the exon.bed reference file location with --r (e.g., --r /users/Hs_exon.bed)';sys.exit()
parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=useMultiProcessing,MLP=mlp)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/multiBAMtoBED.py
|
multiBAMtoBED.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This script can be run on its own to extract a single BAM file at a time or
indirectly by multiBAMtoBED.py to extract exon.bed files (Tophat format)
from many BAM files in a single directory at once. Requires an exon.bed reference
file for exon coordinates (genomic bins for which to sum unique read counts).
Excludes junction reads within each interval"""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
import copy,math
import time
import getopt
import traceback
def AppendOrWrite(export_path):
status = verifyFile(export_path)
if status == 'not found':
export_data = open(export_path,'w') ### Write this new file
else:
export_data = open(export_path,'a') ### Appends to existing file
return export_data
def verifyFile(filename):
status = 'not found'
try:
for line in open(filename,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
class IntronRetenionReads():
def __init__(self):
self.spanning=0
self.containing=0
def setCombinedPositions(self,combined_pos):
self.combined_pos = combined_pos
def setIntronSpanningRead(self,read_pos):
self.read_pos = read_pos
self.spanning=1
def setIntronMateRead(self):
self.containing=1
def IntronSpanningRead(self):
return self.read_pos
def CombinedPositions(self):
return self.combined_pos
def For_and_Rev_Present(self, ):
if (self.containing+self.spanning)==2:
return True
else:
return False
def parseExonReferences(bam_dir,reference_exon_bed,multi=False,intronRetentionOnly=False, MateSearch=False, species=None):
start_time = time.time()
bamfile = pysam.Samfile(bam_dir, "rb" )
reference_rows=0
output_bed_rows=0
exportCoordinates=True
try:
from import_scripts import BAMtoJunctionBED
retainedIntrons, chrm_found, gene_coord_db = BAMtoJunctionBED.retreiveAllKnownSpliceSites(returnExonRetention=True,DesignatedSpecies=species,path=bam_dir)
except Exception:
#print traceback.format_exc();sys.exit()
retainedIntrons={}
if intronRetentionOnly==False:
o = open (string.replace(bam_dir,'.bam','__exon.bed'),"w")
#io = AppendOrWrite(string.replace(bam_dir,'.bam','__junction.bed'))
io = open (string.replace(bam_dir,'.bam','__intronJunction.bed'),"w")
if exportCoordinates:
eo = open (reference_exon_bed[:-4]+'__minimumIntronIntervals.bed',"w")
intron_count=0
quick_look = 0
paired = False ### Indicates if paired-end reads exist in the file
for entry in bamfile.fetch():
example_chromosome = bamfile.getrname(entry.rname)
try: mate = bamfile.mate(entry); paired=True
except Exception: pass
quick_look+=1
if quick_look>20: ### Only examine the first 20 reads
break
### Import the gene data and remove introns that overlap with exons on the opposite or same strand
exonData_db={}
exon_sorted_list=[]
for line in open(reference_exon_bed,'rU').xreadlines(): ### read each line one-at-a-time rather than loading all in memory
line = line.rstrip('\n')
reference_rows+=1
#if reference_rows==6000: break
ref_entries = string.split(line,'\t'); #'12', '6998470', '6998522', 'ENSG00000111671:E1.1_ENSE00001754003', '0', '-'
chr,start,stop,exon,null,strand = ref_entries[:6]
start = int(start)
stop = int(stop)
if 'novel' not in exon:
exon_sorted_list.append([chr,start,stop,exon,strand])
exon_sorted_list.sort()
exon_sorted_filtered=[]
exon_index=0
def checkOverlap(region, query_regions):
### Do not trust intron retention estimates if the intron is within an exon
chr,start,stop,exon,strand = region
gene = string.split(exon,':')[0]
overlap = False
for (chr,start2,stop2,exon2,strand2) in query_regions:
if ':E' in exon2 and gene not in exon2:
x = [start,stop,start2,stop2]
x.sort()
"""
if exon2 == 'ENSG00000155657:E363.1' and exon == 'ENSG00000237298:I10.1':
if x[1]==start and x[2]==stop: print 'kill'
print x, start, stop;sys.exit()
"""
if x[:2] == [start,stop] or x[-2:] == [start,stop]:
### intron NOT overlapping with an exon
#print region; print chr,start2,stop2,exon2,strand2;sys.exit()
pass
else: ### Partial or complete exon overlap with an intron in a second gene
if x[0]==start and x[-1]==stop and (start2-start)>100 and (stop-stop2)>100:
### Classic small intronic RNA example
pass
elif x[1]==start and x[2]==stop:
### The exon spans the entire intron
overlap = True
elif (stop2-start2)<50 and (stop-start)>400:
### Considered a minor insignificant overlap that can't account for paired-end mapping
pass
elif ((x[1]-x[0])<50 or (x[3]-x[2])<50 or (x[2]-x[1])<50) and (stop-start)>400:
### Considered a minor insignificant overlap that can't account for paired-end mapping
### Minor partial overlap of either side of the intron with an exon
if (stop2-start)>50 or (stop-start2)>50: ### Then it is more than a minor overlap
overlap = True
else:
overlap = True
"""
if exon == 'ENSG00000230724:I7.1' and exon2 == 'ENSG00000255229:E1.1':
print (x[1]-x[0]), (x[3]-x[2]), (x[2]-x[1]), (stop-start);sys.exit()
"""
return overlap
exonOverlappingIntrons=0
totalIntrons=0
for region in exon_sorted_list:
chr,start,stop,exon,strand = region
if ':I' in exon or exon in retainedIntrons:
try:
totalIntrons+=1
query_regions = exon_sorted_list[exon_index-10:exon_index]+exon_sorted_list[exon_index+1:exon_index+10]
overlap = checkOverlap(region,query_regions)
"""
if 'ENSG00000262880:I3.1' == exon:
for i in query_regions: print i
print chr,start,stop,exon,strand
print overlap;sys.exit()
"""
if overlap == True:
"""
if exonOverlappingIntrons>10000000:
for i in query_regions: print i
print chr,start,stop,exon,strand
print overlap;sys.exit()
"""
exon_index+=1
exonOverlappingIntrons+=1
continue
except Exception: pass
exon_index+=1
#try: exonData_db[gene].append([chr,start,stop,exon,strand])
#except Exception: exonData_db[gene]=[[chr,start,stop,exon,strand]]
exon_sorted_filtered.append(region)
#print exonOverlappingIntrons, 'introns overlapping with exons in distinct genes out of',totalIntrons;sys.exit()
for (chr,start,stop,exon,strand) in exon_sorted_filtered:
read_count=0;
five_intron_junction_count=0
three_intron_junction_count=0
intronJunction={}
exportIntervals=[]
#if exon != 'ENSG00000167107:I10.1': continue
if 'chr' in example_chromosome and 'chr' not in chr: ### Ensures the reference chromosome names match the query
chr = 'chr'+chr
try:
#if exon == 'ENSMUSG00000001472:E17.1':
#chr = '12'; start = '6998470'; stop = '6998522'
if intronRetentionOnly:
if ':E' in exon and exon not in retainedIntrons:
continue
if ':I' in exon or exon in retainedIntrons:
INTRON = True
else:
INTRON = False
start,stop=int(start),int(stop)
regionLen = abs(start-stop)
interval_read_count=0
if exportCoordinates:
if regionLen<700:
exportIntervals = [[start-50,stop+50]] ### Buffer intron into the exon
else:
exportIntervals = [[start-50,start+350],[stop-350,stop+50]]
#print exportIntervals, start, stop;sys.exit()
for interval in exportIntervals:
interval = map(str,interval)
eo.write(string.join([chr]+interval+[exon,'',strand],'\t')+'\n')
#chr = '*'
for alignedread in bamfile.fetch(chr, start,stop):
proceed = True
interval_read_count+=1
try: cigarstring = alignedread.cigarstring
except Exception:
codes = map(lambda x: x[0],alignedread.cigar)
if 3 in codes: cigarstring = 'N'
else: cigarstring = ''
try: read_strand = alignedread.opt('XS') ### TopHat/STAR knows which sequences are likely real splice sites so it assigns a real strand to the read
except Exception,e:
#if multi == False: print 'No TopHat strand information';sys.exit()
read_strand = None ### TopHat doesn't predict strand for many reads
if read_strand==None or read_strand==strand: ### Tries to ensure the propper strand reads are considered (if strand read info available)
if cigarstring == None: pass
else:
### Exclude junction reads ("N")
if 'N' in cigarstring:
if intronRetentionOnly==False:
X=int(alignedread.pos)
Y=int(alignedread.pos+alignedread.alen)
proceed = False
a = [X,Y]; a.sort()
b = [X,Y,start,stop]; b.sort()
if a[0]==b[1] or a[1]==b[2]: ### Hence, the read starts or ends in that interval
proceed = True
if proceed == False:
### Also search for cases were part of the read is contained within the exon
from import_scripts import BAMtoJunctionBED
coordinates,up_to_intron_dist = BAMtoJunctionBED.getSpliceSites(alignedread.cigar,X)
for (five_prime_ss,three_prime_ss) in coordinates:
five_prime_ss,three_prime_ss=int(five_prime_ss),int(three_prime_ss)
if five_prime_ss==start or three_prime_ss==start or five_prime_ss==stop or three_prime_ss==stop:
proceed = True
#print five_prime_ss, three_prime_ss, start, stop;sys.exit()
else:
### Below code is for more accurate estimation of intron retention
#"""
try:
if INTRON:
X=int(alignedread.pos)
Y=int(alignedread.pos+alignedread.alen)
read_pos = [X,Y]; read_pos.sort()
combined_pos = [X,Y,start,stop]; combined_pos.sort()
try: readname = alignedread.qname
except: readname = alignedread.query_name
if MateSearch==False:
if read_pos[0]==combined_pos[0] or read_pos[1]==combined_pos[-1]:
### Hence, the read starts or ends OUTSIDE of that interval (Store the overlap read coordinates)
if readname in intronJunction: ### occurs when the other mate has been stored to this dictionary
ir = intronJunction[readname]
ir.setIntronSpanningRead(read_pos)
ir.setCombinedPositions(combined_pos)
else:
ir = IntronRetenionReads() ### first time the read-name added to this dictionary
ir.setIntronSpanningRead(read_pos)
ir.setCombinedPositions(combined_pos)
intronJunction[readname] = ir
if paired == False:
ir.setIntronMateRead() ### For single-end FASTQ (not accurate)
else:
intron_boundaries = [start,stop]; intron_boundaries.sort()
if intron_boundaries[0]==combined_pos[0] and intron_boundaries[-1]==combined_pos[-1]: ### Hence, the read occurs entirely within the intron
### Store the "MATE" information (intron contained read)
if readname in intronJunction:
ir = intronJunction[readname]
ir.setIntronMateRead()
found = ir.For_and_Rev_Present()
else:
ir = IntronRetenionReads()
ir.setIntronMateRead()
intronJunction[readname] = ir
if readname in intronJunction:
ir = intronJunction[readname]
found = ir.For_and_Rev_Present()
if found: ### if the intron is less 500 (CAN CAUSE ISSUES IF READS BLEED OVER ON BOTH SIDES OF THE INTRON)
combined_pos = ir.CombinedPositions()
read_pos = ir.IntronSpanningRead()
if read_pos[0]==combined_pos[0]:
five_intron_junction_count+=1 ### intron junction read that spans the 5' intron-exon
#print readname, exon, start, stop, X, Y, strand, read_pos, combined_pos
elif read_pos[1]==combined_pos[-1]:
three_intron_junction_count+=1 ### intron junction read that spans the 3' intron-exon
elif regionLen<500:
combined_pos = ir.CombinedPositions()
read_pos = ir.IntronSpanningRead()
intron_read_overlap = combined_pos[2]-combined_pos[1]
#print intron_read_overlap
if intron_read_overlap>25:
if read_pos[0]==combined_pos[0]:
five_intron_junction_count+=1 ### intron junction read that spans the 5' intron-exon
#print readname, exon, start, stop, X, Y, strand, read_pos, combined_pos
elif read_pos[1]==combined_pos[-1]:
#print read_pos, combined_pos, intron_read_overlap
three_intron_junction_count+=1 ### intron junction read that spans the 3' intron-exon
else:
mate = bamfile.mate(alignedread) ### looup the paired-end mate for this read
try: cigarstring = mate.cigarstring
except Exception:
codes = map(lambda x: x[0],mate.cigar)
if 3 in codes: cigarstring = 'N'
else: cigarstring = ''
if 'N' not in cigarstring:
RX=int(mate.pos)
RY=int(mate.pos+mate.alen)
intron_boundaries = [start,stop]; intron_boundaries.sort()
combined_pos2 = [RX,RY,start,stop]; combined_pos2.sort()
if intron_boundaries[0]==combined_pos2[0] and intron_boundaries[-1]==combined_pos2[-1]:
if read_pos[0]==intron_boundaries[0]:
five_intron_junction_count+=1 ### intron junction read that spans the 5' intron-exon
#print readname,exon, start, stop, X, Y, RX, RY, strand, read_strand;sys.exit()
elif read_pos[1]==intron_boundaries[-1]:
three_intron_junction_count+=1 ### intron junction read that spans the 3' intron-exon
except Exception,e: ### Usually an unmapped read
#print traceback.format_exc();sys.exit()
pass
#"""
if proceed: read_count+=1
if intronRetentionOnly==False:
entries = [chr,str(start),str(stop),exon,null,strand,str(read_count),'0',str(int(stop)-int(start)),'0']
o.write(string.join(entries,'\t')+'\n')
output_bed_rows+=1
#"""
if INTRON and five_intron_junction_count>4 and three_intron_junction_count>4:
interval_read_count = interval_read_count/2 ### if paired-end reads
#print interval_read_count, five_intron_junction_count, three_intron_junction_count
#print abs((math.log(five_intron_junction_count,2)-math.log(three_intron_junction_count,2)))
if abs((math.log(five_intron_junction_count,2)-math.log(three_intron_junction_count,2)))<2: ### if > 4 fold difference
if strand=='-': increment = -1
else: increment = -1
total_count = five_intron_junction_count+three_intron_junction_count
outlier_start = start-10+increment; outlier_end = start+10+increment
junction_id = exon+'-'+str(start)
exon_lengths = '10,10'; dist = '0,0'
entries = [chr,str(outlier_start),str(outlier_end),junction_id,str(five_intron_junction_count),strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
io.write(string.join(entries,'\t')+'\n')
### 3' junction
if strand=='-': increment = 0
else: increment = 0
outlier_start = stop-10+increment; outlier_end = stop+10+increment
junction_id = exon+'-'+str(stop)
exon_lengths = '10,10'; dist = '0,0'
entries = [chr,str(outlier_start),str(outlier_end),junction_id,str(three_intron_junction_count),strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
io.write(string.join(entries,'\t')+'\n')
intron_count+=1
output_bed_rows+=1
#if output_bed_rows==1000: break
#"""
except Exception,e:
#print e;sys.exit()
### Occurs also due to non-chromosome contigs in the annotation file
if 'bamfile without index' in e:
print 'Please ensure an index exists for the bam file:',bam_dir;sys.exit()
try: o.close()
except Exception: pass
try: eo.close()
except Exception: pass
try: io.close()
except Exception: pass
bamfile.close()
if multi==False:
print time.time()-start_time, 'seconds to assign reads for %d entries from %d reference entries' % (output_bed_rows,reference_rows)
if __name__ == "__main__":
#bam_dir = "H9.102.2.6.bam"
#reference_dir = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
species = None
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoExonBED.py --i /Users/me/sample1.bam --r /Users/me/Hs_exon-cancer_hg19.bed"
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r=','s=','species='])
for opt, arg in options:
if opt == '--i': bam_dir=arg ### A single BAM file location (full path)
elif opt == '--s': species = arg
elif opt == '--species': species = arg
elif opt == '--r': reference_dir=arg ### An exon.bed reference file (created by AltAnalyze from junctions, multiBAMtoBED.py or other)
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
parseExonReferences(bam_dir,reference_dir,intronRetentionOnly=True,species=species)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BAMtoExonBED.py
|
BAMtoExonBED.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import itertools
dirfile = unique
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def returnDirectories(sub_dir):
dir_list = unique.returnDirectories(sub_dir)
return dir_list
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if '.' in data:
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir and '.' in data: matches.append(data_dir)
return matches
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineAllLists(files_to_merge,original_filename,includeColumns=False):
headers =[]; all_keys={}; dataset_data={}; files=[]; unique_filenames=[]
count=0
for filename in files_to_merge:
duplicates=[]
count+=1
fn=filepath(filename); x=0; combined_data ={}
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
### If two files with the same name being merged
if file in unique_filenames:
file += str(count)
unique_filenames.append(file)
print file
files.append(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
if includeColumns==False:
for i in t:
headers.append(i+'.'+file)
#headers.append(i)
else:
headers.append(t[includeColumns]+'.'+file)
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
if includeColumns==False:
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
else:
values = [t[includeColumns]]
#key = string.replace(key,' ','')
if len(key)>0 and key != ' ' and key not in combined_data: ### When the same key is present in the same dataset more than once
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
if permform_all_pairwise == 'yes':
try: combined_data[key].append(values); duplicates.append(key)
except Exception: combined_data[key] = [values]
else:
combined_data[key] = values
#print duplicates
dataset_data[filename] = combined_data
for i in dataset_data:
print len(dataset_data[i]), i
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key][0]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError:
values = null_values
if permform_all_pairwise == 'yes':
values = [null_values]
if permform_all_pairwise == 'yes':
try:
val_list = combined_file_data[key]
val_list.append(values)
combined_file_data[key] = val_list
except KeyError: combined_file_data[key] = [values]
else:
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
if permform_all_pairwise == 'yes':
results = getAllListCombinations(combined_file_data[key])
for result in results:
merged=[]
for i in result: merged+=i
values = string.join([key]+merged,'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
else:
try:
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
except Exception: print combined_file_data[key];sys.exit()
data.close()
print "exported",len(dataset_data),"to",export_file
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def getAllListCombinationsLong(a):
ls1 = ['a1','a2','a3']
ls2 = ['b1','b2','b3']
ls3 = ['c1','c2','c3']
ls = ls1,ls2,ls3
list_len_db={}
for ls in a:
list_len_db[len(x)]=[]
print len(list_len_db), list_len_db;sys.exit()
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r+=i
else:
#http://code.activestate.com/recipes/496807-list-of-all-combination-from-multiple-lists/
r=[[]]
for x in a:
t = []
for y in x:
for i in r:
t.append(i+[y])
r = t
return r
def combineUniqueAllLists(files_to_merge,original_filename):
headers =[]; all_keys={}; dataset_data={}; files=[]
for filename in files_to_merge:
print filename
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
for i in t:
headers.append(i+'.'+file)
if x==0:
if data[0]!='#':
x=1;
headers+=t[1:] ###Occurs for the header line
headers+=['null']
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
#key = string.replace(key,' ','')
combined_data[key] = values
if len(key)>0 and key != ' ':
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
dataset_data[filename] = combined_data
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError: values = null_values
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
data.close()
print "exported",len(dataset_data),"to",export_file
def getAllListCombinations(a):
#http://www.saltycrane.com/blog/2011/11/find-all-combinations-set-lists-itertoolsproduct/
""" Nice code to get all combinations of lists like in the above example, where each element from each list is represented only once """
list_len_db={}
for x in a:
list_len_db[len(x)]=[]
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r.append(i[0])
return [r]
else:
return list(itertools.product(*a))
def joinFiles(files_to_merge,CombineType,unique_join,outputDir):
""" Join multiple files into a single output file """
global combine_type
global permform_all_pairwise
global output_dir
output_dir = outputDir
combine_type = string.lower(CombineType)
permform_all_pairwise = 'yes'
print 'combine type:',combine_type
print 'join type:', unique_join
#g = GrabFiles(); g.setdirectory(import_dir)
#files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if unique_join:
combineUniqueAllLists(files_to_merge,'')
else:
combineAllLists(files_to_merge,'')
return output_dir+'/MergedFiles.txt'
if __name__ == '__main__':
dirfile = unique
includeColumns=-2
includeColumns = False
output_dir = filepath('output')
combine_type = 'union'
permform_all_pairwise = 'yes'
print "Analysis Mode:"
print "1) Batch Analysis"
print "2) Single Output"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": batch_mode = 'yes'
elif inp == "2": batch_mode = 'no'
print "Combine Lists Using:"
print "1) Grab Union"
print "2) Grab Intersection"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": combine_type = 'union'
elif inp == "2": combine_type = 'intersection'
if batch_mode == 'yes': import_dir = '/batch/general_input'
else: import_dir = '/input'
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if batch_mode == 'yes':
second_import_dir = '/batch/primary_input'
g = GrabFiles(); g.setdirectory(second_import_dir)
files_to_merge2 = g.searchdirectory('xyz') ###made this a term to excluded
for file in files_to_merge2:
temp_files_to_merge = customLSDeepCopy(files_to_merge)
original_filename = string.split(file,'/'); original_filename = original_filename[-1]
temp_files_to_merge.append(file)
if '.' in file:
combineAllLists(temp_files_to_merge,original_filename)
else:
combineAllLists(files_to_merge,'',includeColumns=includeColumns)
print "Finished combining lists. Select return/enter to exit"; inp = sys.stdin.readline()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/mergeFilesUnique.py
|
mergeFilesUnique.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import unique
import traceback
""" Intersecting Coordinate Files """
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
class EventInformation:
def __init__(self, id, event_direction, clusterID, altExons, coordinates):
self.id = id
self.event_direction = event_direction
self.clusterID = clusterID
self.altExons = altExons
self.coordinates = coordinates
def ID(self):
return self.id
def GeneID(self):
return string.split(self.id,':')[1]
def Symbol(self):
return string.split(self.id,':')[0]
def EventDirection(self):
return self.event_direction
def ClusterID(self):
return self.clusterID
def AltExons(self):
""" Can be multiple exons """
altExons = string.split(self.altExons,'|')
return altExons
def JunctionCoordinateInterval(self):
""" If the exon block is not in the database, return the full interval """
try:
j1,j2 = string.split(self.Coordinates(),'|')
chr,coord1 = string.split(j1,':')
chr,coord2 = string.split(j2,':')
coords = map(int,string.split(coord1,'-'))
coords += map(int,string.split(coord2,'-'))
coords.sort()
return chr,coords[0],coords[-1]
except:
""" See the class circInformation """
return self.coordinates
def AltExonBlockCoord(self):
exon_block_coords=[]
for altExon in self.AltExons():
if 'ENSG' not in altExon:
altExon = self.GeneID() + ':' + altExon
altExon_block = string.split(altExon,'.')[0]
try:
chr, strand, start, end = exon_block_coordinates[altExon_block]
except:
chr, start, end = self.JunctionCoordinateInterval()
exon_block_coords.append(chr+':'+str(start)+'-'+str(end))
return string.join(exon_block_coords,'|')
def FlankingBlockCoord(self):
coords=[]
for altExon in self.AltExons():
if 'ENSG' in altExon:
altExon = string.split(altExon,':')[1]
altExon_block = string.split(altExon,'.')[0]
block_num = int(altExon_block[1:])
block_type = altExon_block[0]
try:
chr, strand, start, end = exon_block_coordinates[self.GeneID() + ':' + block_type+str(block_num)]
coords+=[start, end]
except:
pass
if block_type == 'E':
if extendFlanking == False:
upstream_intron = self.GeneID() + ':' + "I"+str(block_num-1)
else:
upstream_intron = self.GeneID() + ':' + "E"+str(block_num-1)
if upstream_intron not in exon_block_coordinates:
upstream_intron = self.GeneID() + ':' + "I"+str(block_num-1)
try:
chr, strand, Ustart, Uend = exon_block_coordinates[upstream_intron]
except:
chr, Ustart, Uend = self.JunctionCoordinateInterval()
if extendFlanking == False:
downstream_intron = self.GeneID() + ':' + "I"+str(block_num)
else:
downstream_intron = self.GeneID() + ':' + "E"+str(block_num+1)
if downstream_intron not in exon_block_coordinates:
downstream_intron = self.GeneID() + ':' + "I"+str(block_num)
try:
chr, strand, Dstart, Dend = exon_block_coordinates[downstream_intron]
except:
chr, Dstart, Dend = self.JunctionCoordinateInterval()
else:
upstream_exon = self.GeneID() + ':' + "E"+str(block_num)
try:
chr, strand, Ustart, Uend = exon_block_coordinates[upstream_exon]
except:
chr, Ustart, Uend = self.JunctionCoordinateInterval()
downstream_exon = self.GeneID() + ':' + "E"+str(block_num+1)
try:
chr, strand, Dstart, Dend = exon_block_coordinates[downstream_exon]
except:
chr, Dstart, Dend = self.JunctionCoordinateInterval()
coords += [Ustart, Uend, Dstart, Dend]
coords.sort()
start = coords[0]
end = coords[-1]
return start, end
def Coordinates(self):
return self.coordinates
def Export(self):
annotations = [self.Symbol(), self.ID(), self.EventDirection(), self.ClusterID(), self.Coordinates(), self.altExons, self.AltExonBlockCoord()]
return annotations
def __repr__(self):
return self.ID(), self.EventDirection(), self.ClusterID(), self.Coordinates()
class circInformation(EventInformation):
def __init__(self, id, pval, logFC):
self.id = id
self.pval = pval
self.GeneID()
self.Coordinates()
self.FindAssociatedExonsBlocks()
self.logFC = logFC
self.AltExons()
def pVal(self):
return self.pval
def EventDirection(self):
if self.logFC > 0:
return 'inclusion'
else:
return 'exclusion'
def Symbol(self):
symbol = string.split(self.ID(),'-')[0]
self.symbol = symbol
return self.symbol
def GeneID(self):
if self.Symbol() in symbol_to_gene:
geneID=symbol_to_gene[self.Symbol()][0]
for gene in symbol_to_gene[self.Symbol()]:
if 'ENS' in gene:
geneID = gene
else:
geneID = self.Symbol()
return geneID
def Coordinates(self):
c1,c2 = string.split(self.ID(),'-')[-2:]
self.chr,c1 = string.split(c1,':')
self.start = int(c1)
self.end = int(c2)
self.coordinates = self.chr, self.start, self.end
return self.chr+':'+str(self.start)+'-'+str(self.end)
def FindAssociatedExonsBlocks(self):
""" Indentify matching exon/intron regions for the circRNA coordinates """
chr, cstart, cend = self.coordinates
circRNA_coords = [cstart, cend]
circRNA_coords.sort()
if self.GeneID() in gene_to_exons:
search_blocks = gene_to_exons[self.GeneID()]
aligned_blocks=[]
for exon_block in search_blocks:
chr, strand, start, end = exon_block_coordinates[exon_block]
block_coords = [start, end]
block_coords.sort()
coords = circRNA_coords+block_coords
coords.sort()
if len(unique.unique(coords))==3:
if exon_block not in aligned_blocks:
aligned_blocks.append(exon_block)
elif coords[:2]==circRNA_coords or coords[-2:]==circRNA_coords:
pass
else:
if exon_block not in aligned_blocks:
aligned_blocks.append(exon_block)
self.aligned_blocks = aligned_blocks
else:
self.aligned_blocks=[]
return self.aligned_blocks
def ClusterID(self):
return self.GeneID()
def AltExons(self):
self.altExons = string.join(self.aligned_blocks,'|')
return self.aligned_blocks
def Chr(self):
return self.chr
def Start(self):
return self.start
def End(self):
return self.end
def Strand(self):
return ''
def Annotation(self):
return ''
class PeakInformation:
def __init__(self, chr, start, end, strand, annotation, gene, symbol):
self.chr = chr
self.start = start
self.end = end
self.strand = strand
self.annotation = annotation
self.symbol = symbol
self.gene = gene
def Chr(self):
return self.chr
def Start(self):
return self.start
def End(self):
return self.end
def Strand(self):
return self.strand
def GeneID(self):
return self.gene
def Annotation(self):
return self.annotation
def Symbol(self):
return self.symbol
def Coordinates(self):
return self.chr+':'+str(self.Start())+'-'+str(self.End())
def Export(self):
annotations = [self.Coordinates(),self.Annotation()]
return annotations
def __repr__(self):
return self.Symbol(), self.Annotation()
def importSplicingEvents(folder):
dataset_events={}
files = unique.read_directory(folder)
for file in files:
if 'PSI.' in file and '.txt' in file:
events=[]
dataset = file[:-4]
fn = unique.filepath(folder+'/'+file)
firstRow=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
index=0
""" Standard Fields from MultiPath-PSI """
for i in t:
if 'Event-Direction'==i:
ed = index
if 'ClusterID' == i:
ci = index
if 'AltExons' == i:
ae = index
if 'EventAnnotation' == i:
ea = index
if 'Coordinates' == i:
co = index
index+=1
firstRow = False
else:
id = t[0]
event_direction = t[ed]
clusterID = t[ci]
altExons = t[ae]
coordinates = t[co]
ei = EventInformation(id, event_direction, clusterID, altExons, coordinates)
events.append(ei)
dataset_events[dataset]=events
return dataset_events
def eCLIPimport(folder):
eCLIP_dataset_peaks={}
files = unique.read_directory(folder)
for file in files:
if '.bed' in file:
peaks=[]
dataset = file[:-4]
fn = unique.filepath(folder+'/'+file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
chr = t[0]
start = int(t[1])
end = int(t[2])
strand = t[5]
annotation = t[6]
gene = string.split(t[8],'.')[0]
symbol = t[-2]
pi = PeakInformation(chr, start, end, strand, annotation, gene, symbol)
peaks.append(pi)
eCLIP_dataset_peaks[dataset]=peaks
return eCLIP_dataset_peaks
def importExonCoordinates(species):
""" Import exon block, intron block and gene coordinates """
firstRow=True
exon_coordinate_path = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
fn = unique.filepath(exon_coordinate_path)
gene_coordinates={}
gene_to_exons={}
exon_block_coordinates={}
gene_chr_strand = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
else:
gene, exonid, chr, strand, exon_region_starts, exon_region_ends, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t
exon_region_starts = map(int,string.split(exon_region_starts,'|'))
exon_region_ends = map(int,string.split(exon_region_ends,'|'))
exon_block = gene+':'+string.split(exonid,'.')[0]
gene_chr_strand[gene]=chr,strand
if gene in gene_to_exons:
gene_to_exons[gene].append(exon_block)
else:
gene_to_exons[gene] = [exon_block]
if gene in gene_coordinates:
gene_coordinates[gene]+=exon_region_starts+exon_region_ends
else:
gene_coordinates[gene]=exon_region_starts+exon_region_ends
if exon_block in exon_block_coordinates:
exon_block_coordinates[exon_block]+=exon_region_starts+exon_region_ends
else:
exon_block_coordinates[exon_block]=exon_region_starts+exon_region_ends
for gene in gene_coordinates:
gene_coordinates[gene].sort()
start = gene_coordinates[gene][0]
end = gene_coordinates[gene][-1]
chr,strand = gene_chr_strand[gene]
gene_coordinates[gene]= chr, strand, start, end
for exon in exon_block_coordinates:
exon_block_coordinates[exon].sort()
start = exon_block_coordinates[exon][0]
end = exon_block_coordinates[exon][-1]
chr,strand = gene_chr_strand[string.split(exon,':')[0]]
exon_block_coordinates[exon] = chr, strand, start, end
print len(gene_coordinates), 'genes'
print len(exon_block_coordinates), 'exons/introns'
return gene_coordinates, exon_block_coordinates, gene_to_exons
def importGeneSymbols(species):
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
from import_scripts import OBO_import
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
return gene_to_symbol, symbol_to_gene
def importCircularRNAEvents(folder,circ_p):
dataset_events={}
files = unique.read_directory(folder)
for file in files:
if 'circRNA.' in file and '.txt' in file:
events=[]
dataset = file[:-4]
fn = unique.filepath(folder+'/'+file)
firstRow=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
index=0
""" Standard Fields from MultiPath-PSI """
for i in t:
if 'PValue'==i:
pv = index
if 'logFC'==i:
lf = index
index+=1
firstRow = False
else:
id = t[0]
pval = float(t[pv])
logFC = float(t[lf])
ci = circInformation(id, pval, logFC)
if pval < circ_p:
events.append(ci)
dataset_events[dataset]=events
return dataset_events
def alignEventsAndPeaks(eCLIP, AS, eCLIP_peaks,AS_events,AS_event_dir):
""" Compare genomic coordinates from these two datasets """
eCLIP_gene_peaks = {}
eCLIP_symbol_peaks = {}
AS_gene_events = {}
gene_to_symbol = {}
""" Create gene indexes for both datasets, allowing alternative matches by symbol """
for pi in eCLIP_peaks:
if pi.GeneID() not in eCLIP_gene_peaks:
eCLIP_gene_peaks[pi.GeneID()] = [pi]
else:
eCLIP_gene_peaks[pi.GeneID()].append(pi)
if pi.Symbol() not in eCLIP_symbol_peaks:
eCLIP_symbol_peaks[pi.Symbol()] = [pi]
else:
eCLIP_symbol_peaks[pi.Symbol()].append(pi)
for ei in AS_events:
if ei.GeneID() not in AS_gene_events:
AS_gene_events[ei.GeneID()] = [ei]
else:
AS_gene_events[ei.GeneID()].append(ei)
gene_to_symbol[ei.GeneID()] = ei.Symbol()
""" Match peaks to splicing events based on annotated genes (could do by coordinates) """
gene_export = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_gene.txt')
gene_export_incl = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_gene-incl.txt')
gene_export_excl = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_gene-excl.txt')
exon_export = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_exon.txt')
exon_export_incl = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_exon-incl.txt')
exon_export_excl = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_exon-excl.txt')
flanking_export = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_flanking.txt')
flanking_export_incl = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_flanking-incl.txt')
flanking_export_excl = export.ExportFile(AS_event_dir+'/eCLIP-overlaps/'+eCLIP+'_'+AS+'_flanking-excl.txt')
header = ['Symbol', 'UID', 'EventDirection', 'ClusterID', 'Coordinates', 'altExons', 'AltExonBlockCoord','Peak-Coordinates','Peak-Annotations']
header = string.join(header,'\t')+'\n'
gene_export.write(header)
gene_export_incl.write(header)
gene_export_excl.write(header)
exon_export.write(header)
exon_export_incl.write(header)
exon_export_excl.write(header)
flanking_export.write(header)
flanking_export_incl.write(header)
flanking_export_excl.write(header)
exported=[]
for geneID in AS_gene_events:
symbol = gene_to_symbol[geneID]
pi_set = None
if geneID in eCLIP_gene_peaks:
pi_set = eCLIP_gene_peaks[geneID]
elif symbol in eCLIP_symbol_peaks:
pi_set = eCLIP_symbol_peaks[symbol]
if pi_set != None:
""" Matching peak and AS at the gene-level """
for ei in AS_gene_events[geneID]:
event_annotations = ei.Export()
for altExon in ei.AltExons():
if 'ENSG' not in altExon:
altExon = ei.GeneID() + ':' + altExon
altExon_block = string.split(altExon,'.')[0]
try:
chr, strand, start, end = exon_block_coordinates[altExon_block]
except:
""" Can occur if the exon region is a novel 5' exon (not in database)
use the junction interval instead (less precise) """
chr, start, end = ei.JunctionCoordinateInterval()
for pi in pi_set:
peak_annotations = pi.Export()
overlaps = string.join(event_annotations+peak_annotations,'\t')+'\n'
if overlaps in exported:
pass ### This comparison has already been exported
else:
exported.append(overlaps)
gene_export.write(overlaps)
if ei.EventDirection()=='inclusion':
gene_export_incl.write(overlaps)
else:
gene_export_excl.write(overlaps)
""" Find direct exon overlaps """
AS_coords = [start,end]
AS_coords.sort()
Peak_coords = [pi.Start(),pi.End()]
Peak_coords.sort()
coords = AS_coords+Peak_coords
coords.sort()
if coords[:2]==AS_coords or coords[-2:]==AS_coords:
pass
else:
exon_export.write(overlaps)
if ei.EventDirection()=='inclusion':
exon_export_incl.write(overlaps)
else:
exon_export_excl.write(overlaps)
""" Find indirect flanking intron overlaps """
flank_start,flank_end = ei.FlankingBlockCoord()
AS_coords = [flank_start,flank_end]
AS_coords.sort()
Peak_coords = [pi.Start(),pi.End()]
Peak_coords.sort()
coords = AS_coords+Peak_coords
coords.sort()
if coords[:2]==AS_coords or coords[-2:]==AS_coords:
pass
else:
flanking_export.write(overlaps)
if ei.EventDirection()=='inclusion':
flanking_export_incl.write(overlaps)
else:
flanking_export_excl.write(overlaps)
if __name__ == '__main__':
################ Comand-line arguments ################
import getopt
splicing_events_dir = None
CLIP_dir = None
circRNA_dir = None
species = 'Hs'
extendFlanking = False
circ_p = 0.05
""" Usage:
# For finding overlaps between annotated eCLIP and MultiPath-PSI splicing events from AltAnalyze
import_scripts/peakOverlaps.py --species Hs --clip /Users/abcd/dataAnalysis/eCLIP --events /Users/abcd/dataAnalysis/AS-Events/ --extendFlanking True
# For finding overlaps between annotated eCLIP and circRNA outputs from edgeR
import_scripts/peakOverlaps.py --species Hs --clip /Users/abcd/dataAnalysis/eCLIP --events /Users/abcd/dataAnalysis/circRNA/ --extendFlanking True --circ_p 0.1
# For finding overlaps between annotated MultiPath-PSI splicing events and circRNA outputs from edgeR
python import_scripts/peakOverlaps.py --species Hs --circ /Users/abcd/dataAnalysis/circRNA/ --circ_p 0.1 --events /Users/abcd/dataAnalysis/AS-Events/ --extendFlanking True
"""
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print 'WARNING!!!! Too commands supplied.'
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['species=','clip=','events=', 'extendFlanking=', 'circ=', 'circ_p='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--species':
species = arg
elif opt == '--clip':
CLIP_dir = arg
elif opt == '--circ':
circRNA_dir = arg
elif opt == '--circ_p':
circ_p = float(arg)
elif opt == '--events':
splicing_events_dir = arg
elif opt == '--extendFlanking':
if 'true' in string.lower(arg) or 'yes' in string.lower(arg):
extendFlanking = True
print 'Extend Flanking = TRUE'
gene_to_symbol, symbol_to_gene = importGeneSymbols(species)
gene_coordinates, exon_block_coordinates, gene_to_exons = importExonCoordinates(species)
if CLIP_dir != None:
dataset_peaks = eCLIPimport(CLIP_dir)
if splicing_events_dir != None:
AS_dataset_events = importSplicingEvents(splicing_events_dir)
dataset_events = AS_dataset_events
events_dir = splicing_events_dir
if circRNA_dir != None:
circRNA_dataset_events = importCircularRNAEvents(circRNA_dir, circ_p)
dataset_events = circRNA_dataset_events
events_dir = circRNA_dir
if CLIP_dir == None:
""" Comparison of circRNA coordinates as peaks to AS events """
dataset_events = AS_dataset_events
dataset_peaks = circRNA_dataset_events
print len(dataset_peaks), 'peak datasets'
print len(dataset_events), 'Alternative-event datasets\n'
for eCLIP in dataset_peaks:
for AE in dataset_events:
print 'Aligning coordinates from', eCLIP, 'to', AE
eCLIP_peaks = dataset_peaks[eCLIP]
events = dataset_events[AE]
alignEventsAndPeaks(eCLIP, AE, eCLIP_peaks, events,events_dir)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/peakOverlaps.py
|
peakOverlaps.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This script can be run on its own to extract a single BAM file at a time or
indirectly by multiBAMtoBED.py to extract junction.bed files (Tophat format)
from many BAM files in a single directory at once. Currently uses the Tophat
predicted Strand notation opt('XS') for each read. This can be substituted with
strand notations from other aligners (check with the software authors)."""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
#import bamnostic as pysam
import copy,getopt
import time
import traceback
try: import export
except Exception: pass
try: import unique
except Exception: pass
try:
import TabProxies
import ctabix
import csamtools
import cvcf
except Exception:
try:
#if os.name != 'posix': print traceback.format_exc()
pass
except Exception: pass
try:
from pysam import libctabixproxies
except:
#print traceback.format_exc()
pass
def getSpliceSites(cigarList,X):
cummulative=0
coordinates=[]
for (code,seqlen) in cigarList:
if code == 0:
cummulative+=seqlen
if code == 3:
#if strand == '-':
five_prime_ss = str(X+cummulative)
cummulative+=seqlen ### add the intron length
three_prime_ss = str(X+cummulative+1) ### 3' exon start (prior exon splice-site + intron length)
coordinates.append([five_prime_ss,three_prime_ss])
up_to_intron_dist = cummulative
return coordinates, up_to_intron_dist
def writeJunctionBedFile(junction_db,jid,o):
strandStatus = True
for (chr,jc,tophat_strand) in junction_db:
if tophat_strand==None:
strandStatus = False
break
#if strandStatus== False: ### If no strand information in the bam file filter and add known strand data
junction_db2={}
strand='+'
for (chr,jc,tophat_strand) in junction_db:
original_chr = chr
if 'chr' not in chr:
chr = 'chr'+chr
for j in jc:
if tophat_strand==None:
try:
strand = splicesite_db[chr,j]
junction_db2[(original_chr,jc,strand)]=junction_db[(original_chr,jc,tophat_strand)]
except Exception:
### Assume the strand is the last strand detected (in the same genomic region)
junction_db2[(original_chr,jc,strand)]=junction_db[(original_chr,jc,tophat_strand)]
else:
### Programs like HISAT2 have some reads with strand assigned and others without
junction_db2[(original_chr,jc,tophat_strand)]=junction_db[(original_chr,jc,tophat_strand)]
junction_db = junction_db2
for (chr,jc,tophat_strand) in junction_db:
x_ls=[]; y_ls=[]; dist_ls=[]
read_count = str(len(junction_db[(chr,jc,tophat_strand)]))
for (X,Y,dist) in junction_db[(chr,jc,tophat_strand)]:
x_ls.append(X); y_ls.append(Y); dist_ls.append(dist)
outlier_start = min(x_ls); outlier_end = max(y_ls); dist = str(max(dist_ls))
exon_lengths = outlier_start
exon_lengths = str(int(jc[0])-outlier_start)+','+str(outlier_end-int(jc[1])+1)
junction_id = 'JUNC'+str(jid)+':'+jc[0]+'-'+jc[1] ### store the unique junction coordinates in the name
output_list = [chr,str(outlier_start),str(outlier_end),junction_id,read_count,tophat_strand,str(outlier_start),str(outlier_end),'255,0,0\t2',exon_lengths,'0,'+dist]
o.write(string.join(output_list,'\t')+'\n')
def writeIsoformFile(isoform_junctions,o):
for coord in isoform_junctions:
isoform_junctions[coord] = unique.unique(isoform_junctions[coord])
if '+' in coord:
print coord, isoform_junctions[coord]
if '+' in coord:
sys.exit()
def verifyFileLength(filename):
count = 0
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def retreiveAllKnownSpliceSites(returnExonRetention=False,DesignatedSpecies=None,path=None):
### Uses a priori strand information when none present
import export, unique
chromosomes_found={}
try: parent_dir = export.findParentDir(bam_file)
except Exception: parent_dir = export.findParentDir(path)
species = None
for file in os.listdir(parent_dir):
if 'AltAnalyze_report' in file and '.log' in file:
log_file = unique.filepath(parent_dir+'/'+file)
log_contents = open(log_file, "rU")
species_tag = ' species: '
for line in log_contents:
line = line.rstrip()
if species_tag in line:
species = string.split(line,species_tag)[1]
if species == None:
try: species = IndicatedSpecies
except Exception: species = DesignatedSpecies
splicesite_db={}
gene_coord_db={}
length=0
try:
#if ExonReference==None:
exon_dir = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
length = verifyFileLength(exon_dir)
except Exception:
#print traceback.format_exc();sys.exit()
pass
if length==0:
exon_dir = ExonReference
refExonCoordinateFile = unique.filepath(exon_dir)
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
spliceEvent = t[-2]
#start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
try:
gene_coord_db[geneID,chr].append(int(start))
gene_coord_db[geneID,chr].append(int(stop))
except Exception:
gene_coord_db[geneID,chr] = [int(start)]
gene_coord_db[geneID,chr].append(int(stop))
if returnExonRetention:
if 'exclusion' in spliceEvent or 'exclusion' in spliceEvent:
splicesite_db[geneID+':'+exon]=[]
else:
splicesite_db[chr,start]=strand
splicesite_db[chr,stop]=strand
if len(chr)<5 or ('GL0' not in chr and 'GL' not in chr and 'JH' not in chr and 'MG' not in chr):
chromosomes_found[string.replace(chr,'chr','')] = []
for i in gene_coord_db:
gene_coord_db[i].sort()
gene_coord_db[i] = [gene_coord_db[i][0],gene_coord_db[i][-1]]
return splicesite_db,chromosomes_found,gene_coord_db
def exportIndexes(input_dir):
import unique
bam_dirs = unique.read_directory(input_dir)
print 'Building BAM index files',
for file in bam_dirs:
if string.lower(file[-4:]) == '.bam':
bam_dir = input_dir+'/'+file
bamf = pysam.Samfile(bam_dir, "rb" )
### Is there an indexed .bai for the BAM? Check.
try:
for entry in bamf.fetch():
codes = map(lambda x: x[0],entry.cigar)
break
except Exception:
### Make BAM Indexv lciv9df8scivx
print '.',
bam_dir = str(bam_dir)
#On Windows, this indexing step will fail if the __init__ pysam file line 51 is not set to - catch_stdout = False
pysam.index(bam_dir)
bamf = pysam.Samfile(bam_dir, "rb" )
def parseJunctionEntries(bam_dir,multi=False, Species=None, ReferenceDir=None):
global bam_file
global splicesite_db
global IndicatedSpecies
global ExonReference
IndicatedSpecies = Species
ExonReference = ReferenceDir
bam_file = bam_dir
try: splicesite_db,chromosomes_found, gene_coord_db = retreiveAllKnownSpliceSites()
except Exception:
print traceback.format_exc()
splicesite_db={}; chromosomes_found={}
start = time.time()
try: import collections; junction_db=collections.OrderedDict()
except Exception:
try: import ordereddict; junction_db = ordereddict.OrderedDict()
except Exception: junction_db={}
original_junction_db = copy.deepcopy(junction_db)
bamf = pysam.Samfile(bam_dir, "rb" )
### Is there an indexed .bai for the BAM? Check.
try:
for entry in bamf.fetch():
codes = map(lambda x: x[0],entry.cigar)
break
except Exception:
### Make BAM Index
if multi == False:
print 'Building BAM index file for', bam_dir
bam_dir = str(bam_dir)
#On Windows, this indexing step will fail if the __init__ pysam file line 51 is not set to - catch_stdout = False
pysam.index(bam_dir)
bamf = pysam.Samfile(bam_dir, "rb" )
chromosome = False
chromosomes={}
bam_reads=0
count=0
jid = 1
prior_jc_start=0
l1 = None; l2=None
o = open (string.replace(bam_dir,'.bam','__junction.bed'),"w")
o.write('track name=junctions description="TopHat junctions"\n')
export_isoform_models = False
if export_isoform_models:
io = open (string.replace(bam_dir,'.bam','__isoforms.txt'),"w")
isoform_junctions = copy.deepcopy(junction_db)
outlier_start = 0; outlier_end = 0; read_count = 0; c=0
for entry in bamf.fetch():
bam_reads+=1
try: cigarstring = entry.cigarstring
except Exception:
codes = map(lambda x: x[0],entry.cigar)
if 3 in codes: cigarstring = 'N'
else: cigarstring = None
if cigarstring != None:
if 'N' in cigarstring: ### Hence a junction
if prior_jc_start == 0: pass
elif (entry.pos-prior_jc_start) > 5000 or bamf.getrname( entry.rname ) != chromosome: ### New chr or far from prior reads
writeJunctionBedFile(junction_db,jid,o)
#writeIsoformFile(isoform_junctions,io)
junction_db = copy.deepcopy(original_junction_db) ### Re-set this object
jid+=1
chromosome = bamf.getrname( entry.rname )
chromosomes[chromosome]=[] ### keep track
X=entry.pos
#if entry.query_name == 'SRR791044.33673569':
#print chromosome, entry.pos, entry.reference_length, entry.alen, entry.query_name
Y=entry.pos+entry.alen
prior_jc_start = X
try: tophat_strand = entry.opt('XS') ### TopHat knows which sequences are likely real splice sites so it assigns a real strand to the read
except Exception:
#if multi == False: print 'No TopHat strand information';sys.exit()
tophat_strand = None
coordinates,up_to_intron_dist = getSpliceSites(entry.cigar,X)
#if count > 100: sys.exit()
#print entry.query_name,X, Y, entry.cigarstring, entry.cigar, tophat_strand
for (five_prime_ss,three_prime_ss) in coordinates:
jc = five_prime_ss,three_prime_ss
#print X, Y, jc, entry.cigarstring, entry.cigar
try: junction_db[chromosome,jc,tophat_strand].append([X,Y,up_to_intron_dist])
except Exception: junction_db[chromosome,jc,tophat_strand] = [[X,Y,up_to_intron_dist]]
if export_isoform_models:
try:
mate = bamf.mate(entry) #https://groups.google.com/forum/#!topic/pysam-user-group/9HM6nx_f2CI
if 'N' in mate.cigarstring:
mate_coordinates,mate_up_to_intron_dist = getSpliceSites(mate.cigar,mate.pos)
else: mate_coordinates=[]
except Exception: mate_coordinates=[]
#print coordinates,mate_coordinates
junctions = map(lambda x: tuple(x),coordinates)
if len(mate_coordinates)>0:
try:
isoform_junctions[chromosome,tuple(junctions),tophat_strand].append(mate_coordinates)
except Exception:
isoform_junctions[chromosome,tuple(junctions),tophat_strand] = [mate_coordinates]
else:
if (chromosome,tuple(junctions),tophat_strand) not in isoform_junctions:
isoform_junctions[chromosome,tuple(junctions),tophat_strand] = []
count+=1
writeJunctionBedFile(junction_db,jid,o) ### One last read-out
if multi == False:
print bam_reads, count, time.time()-start, 'seconds required to parse the BAM file'
o.close()
bamf.close()
missing_chromosomes=[]
for chr in chromosomes_found:
if chr not in chromosomes:
chr = string.replace(chr,'chr','')
if chr not in chromosomes_found:
if chr != 'M' and chr != 'MT':
missing_chromosomes.append(chr)
#missing_chromosomes = ['A','B','C','D']
try: bam_file = export.findFilename(bam_file)
except Exception: pass
return bam_file, missing_chromosomes
if __name__ == "__main__":
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoJunctionBED.py --i /Users/me/sample1.bam"
sys.exit()
else:
Species = None
reference_dir = None
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species=','r='])
for opt, arg in options:
if opt == '--i': bam_dir=arg ### full path of a BAM file
elif opt == '--species': Species=arg ### species for STAR analysis to get strand
elif opt == '--r': reference_dir=arg ### An exon.bed reference file (created by AltAnalyze from junctions, multiBAMtoBED.py or other) - required for STAR to get strand if XS field is empty
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
try: parseJunctionEntries(bam_dir,Species=Species,ReferenceDir=reference_dir)
except ZeroDivisionError:
print [sys.argv[1:]],'error'; error
""" Benchmarking notes: On a 2017 MacBook Pro with 16GB of RAM and a local 7GB BAM file (solid drive), 9 minutes (526s) to complete writing a junction.bed.
To simply search through the file without looking at the CIGAR, the script takes close to 5 minutes (303s)"""
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BAMtoJunctionBED.py
|
BAMtoJunctionBED.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import itertools
dirfile = unique
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def returnDirectories(sub_dir):
dir_list = unique.returnDirectories(sub_dir)
return dir_list
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if '.' in data:
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir and '.' in data: matches.append(data_dir)
return matches
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineAllLists(files_to_merge,original_filename,includeColumns=False):
headers =[]; all_keys={}; dataset_data={}; files=[]; unique_filenames=[]
count=0
for filename in files_to_merge:
duplicates=[]
count+=1
fn=filepath(filename); x=0; combined_data ={}
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
### If two files with the same name being merged
if file in unique_filenames:
file += str(count)
unique_filenames.append(file)
print file
files.append(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
if includeColumns==False:
for i in t:
headers.append(i+'.'+file)
#headers.append(i)
else:
headers.append(t[includeColumns]+'.'+file)
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
if includeColumns==False:
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
else:
values = [t[includeColumns]]
#key = string.replace(key,' ','')
if len(key)>0 and key != ' ' and key not in combined_data: ### When the same key is present in the same dataset more than once
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
if permform_all_pairwise == 'yes':
try: combined_data[key].append(values); duplicates.append(key)
except Exception: combined_data[key] = [values]
else:
combined_data[key] = values
#print duplicates
dataset_data[filename] = combined_data
for i in dataset_data:
print len(dataset_data[i]), i
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key][0]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError:
values = null_values
if permform_all_pairwise == 'yes':
values = [null_values]
if permform_all_pairwise == 'yes':
try:
val_list = combined_file_data[key]
val_list.append(values)
combined_file_data[key] = val_list
except KeyError: combined_file_data[key] = [values]
else:
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
if permform_all_pairwise == 'yes':
results = getAllListCombinations(combined_file_data[key])
for result in results:
merged=[]
for i in result: merged+=i
values = string.join([key]+merged,'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
else:
try:
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
except Exception: print combined_file_data[key];sys.exit()
data.close()
print "exported",len(dataset_data),"to",export_file
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def getAllListCombinationsLong(a):
ls1 = ['a1','a2','a3']
ls2 = ['b1','b2','b3']
ls3 = ['c1','c2','c3']
ls = ls1,ls2,ls3
list_len_db={}
for ls in a:
list_len_db[len(x)]=[]
print len(list_len_db), list_len_db;sys.exit()
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r+=i
else:
#http://code.activestate.com/recipes/496807-list-of-all-combination-from-multiple-lists/
r=[[]]
for x in a:
t = []
for y in x:
for i in r:
t.append(i+[y])
r = t
return r
def combineUniqueAllLists(files_to_merge,original_filename):
headers =[]; all_keys={}; dataset_data={}; files=[]
for filename in files_to_merge:
print filename
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
for i in t:
headers.append(i+'.'+file)
if x==0:
if data[0]!='#':
x=1;
headers+=t[1:] ###Occurs for the header line
headers+=['null']
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
#key = string.replace(key,' ','')
combined_data[key] = values
if len(key)>0 and key != ' ':
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
dataset_data[filename] = combined_data
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError: values = null_values
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
data.close()
print "exported",len(dataset_data),"to",export_file
def getAllListCombinations(a):
#http://www.saltycrane.com/blog/2011/11/find-all-combinations-set-lists-itertoolsproduct/
""" Nice code to get all combinations of lists like in the above example, where each element from each list is represented only once """
list_len_db={}
for x in a:
list_len_db[len(x)]=[]
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r.append(i[0])
return [r]
else:
return list(itertools.product(*a))
def joinFiles(files_to_merge,CombineType,unique_join,outputDir):
""" Join multiple files into a single output file """
global combine_type
global permform_all_pairwise
global output_dir
output_dir = outputDir
combine_type = string.lower(CombineType)
permform_all_pairwise = 'yes'
permform_all_pairwise = 'no' ### Uses only one reference gene ID
print 'combine type:',combine_type
print 'join type:', unique_join
#g = GrabFiles(); g.setdirectory(import_dir)
#files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if unique_join:
combineUniqueAllLists(files_to_merge,'')
else:
combineAllLists(files_to_merge,'')
return output_dir+'/MergedFiles.txt'
if __name__ == '__main__':
dirfile = unique
includeColumns=-2
includeColumns = False
output_dir = filepath('output')
combine_type = 'union'
permform_all_pairwise = 'yes'
print "Analysis Mode:"
print "1) Batch Analysis"
print "2) Single Output"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": batch_mode = 'yes'
elif inp == "2": batch_mode = 'no'
print "Combine Lists Using:"
print "1) Grab Union"
print "2) Grab Intersection"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": combine_type = 'union'
elif inp == "2": combine_type = 'intersection'
if batch_mode == 'yes': import_dir = '/batch/general_input'
else: import_dir = '/input'
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if batch_mode == 'yes':
second_import_dir = '/batch/primary_input'
g = GrabFiles(); g.setdirectory(second_import_dir)
files_to_merge2 = g.searchdirectory('xyz') ###made this a term to excluded
for file in files_to_merge2:
temp_files_to_merge = customLSDeepCopy(files_to_merge)
original_filename = string.split(file,'/'); original_filename = original_filename[-1]
temp_files_to_merge.append(file)
if '.' in file:
combineAllLists(temp_files_to_merge,original_filename)
else:
combineAllLists(files_to_merge,'',includeColumns=includeColumns)
print "Finished combining lists. Select return/enter to exit"; inp = sys.stdin.readline()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/mergeFilesUpdated.py
|
mergeFilesUpdated.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import itertools
dirfile = unique
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def returnDirectories(sub_dir):
dir_list = unique.returnDirectories(sub_dir)
return dir_list
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if '.' in data:
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir and '.' in data: matches.append(data_dir)
return matches
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineAllLists(files_to_merge,original_filename,includeColumns=False):
headers =[]; files=[]
import collections
all_keys=collections.OrderedDict()
dataset_data=collections.OrderedDict()
for filename in files_to_merge:
print filename
duplicates=[]
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
if includeColumns==False:
for i in t:
headers.append(i+'.'+file)
#headers.append(i)
else:
headers.append(t[includeColumns]+'.'+file)
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
if includeColumns==False:
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
else:
values = [t[includeColumns]]
#key = string.replace(key,' ','')
if len(key)>0 and key != ' ' and key not in combined_data: ### When the same key is present in the same dataset more than once
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
if permform_all_pairwise == 'yes':
try: combined_data[key].append(values); duplicates.append(key)
except Exception: combined_data[key] = [values]
else:
combined_data[key] = values
#print duplicates
dataset_data[filename] = combined_data
for i in dataset_data:
print len(dataset_data[i]), i
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = collections.OrderedDict()
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key][0]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError:
values = null_values
if permform_all_pairwise == 'yes':
values = [null_values]
if permform_all_pairwise == 'yes':
try:
val_list = combined_file_data[key]
val_list.append(values)
combined_file_data[key] = val_list
except KeyError: combined_file_data[key] = [values]
else:
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
if permform_all_pairwise == 'yes':
results = getAllListCombinations(combined_file_data[key])
for result in results:
merged=[]
for i in result: merged+=i
values = string.join([key]+merged,'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
else:
try:
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
except Exception: print combined_file_data[key];sys.exit()
data.close()
print "exported",len(dataset_data),"to",export_file
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def getAllListCombinationsLong(a):
ls1 = ['a1','a2','a3']
ls2 = ['b1','b2','b3']
ls3 = ['c1','c2','c3']
ls = ls1,ls2,ls3
list_len_db={}
for ls in a:
list_len_db[len(x)]=[]
print len(list_len_db), list_len_db;sys.exit()
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r+=i
else:
#http://code.activestate.com/recipes/496807-list-of-all-combination-from-multiple-lists/
r=[[]]
for x in a:
t = []
for y in x:
for i in r:
t.append(i+[y])
r = t
return r
def combineUniqueAllLists(files_to_merge,original_filename):
headers =[]; all_keys={}; dataset_data={}; files=[]
for filename in files_to_merge:
print filename
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
for i in t:
headers.append(i+'.'+file)
if x==0:
if data[0]!='#':
x=1;
headers+=t[1:] ###Occurs for the header line
headers+=['null']
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
#key = string.replace(key,' ','')
combined_data[key] = values
if len(key)>0 and key != ' ':
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
dataset_data[filename] = combined_data
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError: values = null_values
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
data.close()
print "exported",len(dataset_data),"to",export_file
def getAllListCombinations(a):
#http://www.saltycrane.com/blog/2011/11/find-all-combinations-set-lists-itertoolsproduct/
""" Nice code to get all combinations of lists like in the above example, where each element from each list is represented only once """
list_len_db={}
for x in a:
list_len_db[len(x)]=[]
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r.append(i[0])
return [r]
else:
return list(itertools.product(*a))
def joinFiles(files_to_merge,CombineType,unique_join,outputDir):
""" Join multiple files into a single output file """
global combine_type
global permform_all_pairwise
global output_dir
output_dir = outputDir
combine_type = string.lower(CombineType)
permform_all_pairwise = 'yes'
print 'combine type:',combine_type
print 'join type:', unique_join
#g = GrabFiles(); g.setdirectory(import_dir)
#files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if unique_join:
combineUniqueAllLists(files_to_merge,'')
else:
combineAllLists(files_to_merge,'')
return output_dir+'/MergedFiles.txt'
if __name__ == '__main__':
dirfile = unique
includeColumns=-2
includeColumns = False
output_dir = filepath('output')
combine_type = 'union'
permform_all_pairwise = 'yes'
print "Analysis Mode:"
print "1) Batch Analysis"
print "2) Single Output"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": batch_mode = 'yes'
elif inp == "2": batch_mode = 'no'
print "Combine Lists Using:"
print "1) Grab Union"
print "2) Grab Intersection"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": combine_type = 'union'
elif inp == "2": combine_type = 'intersection'
if batch_mode == 'yes': import_dir = '/batch/general_input'
else: import_dir = '/input'
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if batch_mode == 'yes':
second_import_dir = '/batch/primary_input'
g = GrabFiles(); g.setdirectory(second_import_dir)
files_to_merge2 = g.searchdirectory('xyz') ###made this a term to excluded
for file in files_to_merge2:
temp_files_to_merge = customLSDeepCopy(files_to_merge)
original_filename = string.split(file,'/'); original_filename = original_filename[-1]
temp_files_to_merge.append(file)
if '.' in file:
combineAllLists(temp_files_to_merge,original_filename)
else:
combineAllLists(files_to_merge,'',includeColumns=includeColumns)
print "Finished combining lists. Select return/enter to exit"; inp = sys.stdin.readline()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/mergeFiles.py
|
mergeFiles.py
|
import os, sys, string
from scipy import sparse, io
import numpy
import math
import time
""" Converts a tab-delimited text file to a sparse matrix """
class SparseMatrix:
def __init__(self,barcodes,features,data_matrix):
self.barcodes = barcodes
self.features = features
self.data_matrix = data_matrix
def Barcodes(self): return self.barcodes
def Features(self): return self.features
def Matrix(self): return self.data_matrix
def import_filter_genes(fn):
gene_filter = []
for line in open(fn,'rU').xreadlines():
if '\t' in line:
gene = string.split(line.rstrip(),'\t')[0]
else:
gene = string.split(line.rstrip(),',')[0]
gene_filter.append(gene)
return gene_filter
def covert_table_to_matrix(fn,delimiter='\t',gene_filter=None,Export=True):
header=True
skip=False
start_time = time.time()
for line in open(fn,'rU').xreadlines():
if header:
delimiter = ',' # CSV file
start = 1
if 'row_clusters' in line:
start=2 # An extra column and row are present from the ICGS file
skip=True
if '\t' in line:
delimiter = '\t' # TSV file
t = string.split(line.rstrip(),delimiter)[start:]
""" Optionally write out the matrix """
if Export:
export_directory = os.path.abspath(os.path.join(fn, os.pardir))+'/sparse/'
try: os.mkdir(export_directory)
except: pass
barcodes_export = open(export_directory+'barcodes.tsv', 'w')
features_export = open(export_directory+'features.tsv', 'w')
matrix_export = open(export_directory+'matrix.mtx', 'w')
barcodes = t
barcodes_export.write(string.join(t,'\n'))
barcodes_export.close()
genes=[]
data_array=[]
header=False
elif skip:
skip=False # Igore the second row in the file that has cluster info
else:
values = string.split(line.rstrip(),'\t')
gene = values[0]
if gene_filter!=None:
""" Exclude the gene from the large input matrix if not in the filter list """
if gene not in gene_filter:
continue
if ' ' in gene:
gene = string.split(gene,' ')[0]
if ':' in gene:
genes.append((gene.rstrip().split(':'))[1])
else:
genes.append(gene)
""" If the data is a float, increment by 0.5 to round up """
values = map(float,values[start:])
def convert(x):
if x==0:
return 0
else:
return int(math.pow(2,x)-1)
values = map(lambda x: convert(x),values)
data_array.append(values)
data_array = sparse.csr_matrix(numpy.array(data_array))
end_time = time.time()
print 'Sparse matrix conversion in',end_time-start_time,'seconds.'
if Export:
features_export.write(string.join(genes,'\n'))
features_export.close()
io.mmwrite(matrix_export, data_array)
else:
sm = SparseMatrix(barcodes,genes,data_array)
return sm
if __name__ == '__main__':
import getopt
gene_filter = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','f='])
for opt, arg in options:
if opt == '--i': fn=arg
elif opt == '--f': gene_filter=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if gene_filter != None:
gene_filter = import_filter_genes(gene_filter)
covert_table_to_matrix(fn,gene_filter=gene_filter)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/TableToMatrix.py
|
TableToMatrix.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
def importFPKMFile(input_file):
added_key={}
firstLine = True
for line in open(input_file,'rU').xreadlines():
data = line.rstrip('\n')
t = string.split(data,'\t')
if firstLine:
firstLine = False
else:
try: geneID= t[0]; symbol=t[1]; position=t[2]; fpkm = t[5]
except Exception: print t;sys.exit()
try: fpkm_db[geneID].append(fpkm)
except Exception: fpkm_db[geneID] = [fpkm]
added_key[geneID]=[]
for i in fpkm_db:
if i not in added_key:
fpkm_db[i].append('0.00')
def importCufflinksFPKMFileEXCEL(filename):
from xlrd import open_workbook
print filename
wb = open_workbook(filename)
for w in wb.sheets():
print w.name;sys.exit()
sys.exit()
rows=[]
for row in range(worksheet.nrows):
print row
values = []
for col in range(worksheet.ncols):
try: values.append(str(s.cell(row,col).value))
except Exception: pass
rows.append(values)
def getFiles(sub_dir,directories=True):
dir_list = os.listdir(sub_dir); dir_list2 = []
for entry in dir_list:
if directories:
if '.' not in entry: dir_list2.append(entry)
else:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def combineCufflinks(root_dir):
export_object = open(root_dir+'/RSEM.txt','w')
global fpkm_db
global headers
fpkm_db={}; headers=['GeneID']
files = getFiles(root_dir,False)
for file in files:
filename = root_dir+'/'+file
if ('.genes.results' in file and '.gz' not in file) or ('.txt' in file and '.gz' not in file and 'RSEM.txt' not in file):
importFPKMFile(filename)
headers.append(file)
if '.xls' in file:
importCufflinksFPKMFileEXCEL(filename)
headers.append(file)
for i in fpkm_db:
fpkm_db[i]=[]
for file in files:
filename = root_dir+'/'+file
if ('.genes.results' in file and '.gz' not in file) or ('.txt' in file and '.gz' not in file and 'RSEM.txt' not in file):
importFPKMFile(filename)
headers = string.join(headers,'\t')+'\n'
export_object.write(headers)
for geneID in fpkm_db:
values = map(str,fpkm_db[geneID])
values = string.join([geneID]+values,'\t')+'\n'
export_object.write(values)
export_object.close()
def gunzipfiles(root_dir):
import gzip
import shutil;
folders = getFiles(root_dir,True)
for folder in folders:
l1 = root_dir+'/'+folder
files = getFiles(l1,False)
for file in files:
filename = l1+'/'+file
if 'genes.fpkm_tracking' in filename:
content = gzip.GzipFile(filename, 'rb')
decompressed_filepath = string.replace(filename,'.gz','')
data = open(decompressed_filepath,'wb')
shutil.copyfileobj(content,data)
if __name__ == '__main__':
#gunzipfiles('/Users/saljh8/Downloads/6b_CUFFLINKS_output/');sys.exit()
import getopt
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','f='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--i': input_file=arg
if '/' in input_file: delim = '/'
else: delim = '\\'
combineCufflinks(input_file);sys.exit()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/combineRSEM.py
|
combineRSEM.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import unique
import copy
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def identifyPutativeSpliceEvents(exon_db,constituitive_probeset_db,array_id_db,agglomerate_inclusion_probesets,onlyAnalyzeJunctions):
exon_dbase = {}; probeset_comparison_db = {}; x = 0; y = 0
### Grab all probesets where we can identify a potential exon inclusion/exclusion event
if len(array_id_db) == 0: array_id_db = exon_db ### Used when exporting all comparitive junction data
for probeset in array_id_db:
if probeset in exon_db:
affygene = exon_db[probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
exons = exon_db[probeset].ExonID() #get rid of last pipe
if probeset not in constituitive_probeset_db:
#thus, there is a 'gene' probeset for that gene, but we don't want to look at the gene probesets
if '|' not in exons: #get rid of any block exons or ambiguities)
try: x += 1; probeset_comparison_db[affygene].append(exons)
except KeyError: x += 1; probeset_comparison_db[affygene] = [exons]
exon_dbase[affygene,exons] = probeset
print "Number of putative probeset comparisons:",x
probe_level_db = {}
for affygene in probeset_comparison_db:
for exon_probeset1 in probeset_comparison_db[affygene]:
for exon_probeset2 in probeset_comparison_db[affygene]:
if exon_probeset1 != exon_probeset2:
if '-' in exon_probeset1: #get both pair-wise possibilities with this, to grab junctions
e1a,e1b = string.split(exon_probeset1,'-')
e1 = e1a,e1b
try:
e2a,e2b = string.split(exon_probeset2,'-')
e2 = e2a,e2b
except ValueError: e2 = exon_probeset2
try: probe_level_db[affygene,e1].append(e2)
except KeyError: probe_level_db[affygene,e1] = [e2]
else: ### Required when exon_probeset1 is a single exon rather than a junction
if '-' in exon_probeset2:
e2a,e2b = string.split(exon_probeset2,'-')
e2 = e2a,e2b
e1 = exon_probeset1
try: probe_level_db[affygene,e2].append(e1)
except KeyError: probe_level_db[affygene,e2] = [e1]
#print "Looking for exon events defined by probeset exon associations"
alt_junction_db,critical_exon_db = independently_rank_analyze_junction_sets(probe_level_db,onlyAnalyzeJunctions)
#print "Associations Built\n"
### Rearange alt_junction_db and agglomerate data for inclusion probesets
exon_inclusion_db={}; exon_inclusion_event_db={}; alt_junction_db_collapsed={}
if agglomerate_inclusion_probesets == 'yes':
for affygene in alt_junction_db:
alt_junction_db[affygene].sort() ### Should be no need to sort later if we do this
for event in alt_junction_db[affygene]:
### event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
event1 = event[0][0]; exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]; probeset2 = exon_dbase[affygene,exon_set2]
if event1 == 'ei':
###First generate the original fold values for export summary, then the adjusted
try: exon_inclusion_db[probeset2].append(probeset1)
except KeyError: exon_inclusion_db[probeset2] = [probeset1]
try: exon_inclusion_event_db[(affygene, probeset2, event[1])].append(event)
except KeyError: exon_inclusion_event_db[(affygene, probeset2, event[1])] = [event]
else: ### Store all the missing mutual exclusive splicing events
try: alt_junction_db_collapsed[affygene].append(event)
except KeyError: alt_junction_db_collapsed[affygene] = [event]
###Create a new alt_junction_db with merged inclusion events
for key in exon_inclusion_event_db:
affygene = key[0]; excl_probeset=key[1]; excl_event = key[2]
###Collect critical exon information from each inclusion exon-set to agglomerate and delete old entries
new_critical_exon_list=[]; incl_exon_sets=[]
for event in exon_inclusion_event_db[key]:
incl_exon_set = event[0][1]; incl_exon_sets.append(incl_exon_set) ### Don't sort since this will throw off probeset relationships: incl_exon_sets.sort()
if len(exon_inclusion_event_db[key])>1: ###If the original list of events > 1
critical_exon_list = critical_exon_db[affygene,tuple(event)][1]
for exon in critical_exon_list: new_critical_exon_list.append(exon)
#del critical_exon_db[affygene,tuple(event)]
new_critical_exon_list = unique.unique(new_critical_exon_list); new_critical_exon_list.sort()
new_critical_exon_list = [1,new_critical_exon_list]
incl_exon_sets_str = string.join(incl_exon_sets,'|') ### New inclusion exon group
event = [('ei',incl_exon_sets_str),excl_event] ### Store new inclusion exon group
try: alt_junction_db_collapsed[affygene].append(event)
except KeyError: alt_junction_db_collapsed[affygene] = [event]
###Replace exon_dbase entries with new combined probeset IDs
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets,'|')
if len(incl_exon_sets)>1: ###Often there will be only a single inclusion probeset
"""for exons in incl_exon_sets:
key = affygene,exons
try: del exon_dbase[key] ###delete individual inclusion exons and replace with a single inclusion agglomerate
except KeyError: continue ###Can occur more than once, if an exon participates in more than one splicing event
"""
exon_dbase[affygene,incl_exon_sets_str] = incl_probesets_str
critical_exon_db[affygene,tuple(event)] = new_critical_exon_list
###Create a new probeset entry in exon_db for the agglomerated probesets
new_block_exon_ids=[] #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure
for probeset in incl_probesets:
edat = exon_db[probeset]; ensembl = edat.ExternalGeneID(); block_exon_ids = edat.SecondaryExonID(); block_structure = edat.GeneStructure()
new_block_exon_ids.append(block_exon_ids)
new_block_exon_ids = string.join(new_block_exon_ids,'')
edat = exon_db[incl_probesets[0]]; edat1 = edat; edat1.setDisplayExonID(incl_exon_sets_str) #; edat1.setExonID(edat.ExonID()) ### Use the first inclusion probeset instance for storing all instance data
edat1.setSecondaryExonID(new_block_exon_ids); edat1.setProbeset(incl_probesets[0])
exon_db[incl_probesets_str] = edat1
print "Length of original splice event database:",len(alt_junction_db)
print "Length of agglomerated splice event database:",len(alt_junction_db_collapsed)
alt_junction_db = alt_junction_db_collapsed ### Replace with agglomerated database
### End Rearangement
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def independently_rank_analyze_junction_sets(probe_level_db,onlyAnalyzeJunctions):
### The below code is used to identify sets of junctions and junction and exon sets anti-correlated with each other
### independently storing the critical exons involved
#probe_level_db[affygene,exons1].append(exons2)
x = 0
critical_exon_db = {}
alt_junction_db = {}
probe_level_db = eliminate_redundant_dict_values(probe_level_db)
for key in probe_level_db:
critical_exon_list = []
affygene = key[0]
exon_pair1 = key[1]
e1a = int(exon_pair1[0][1:])
e1b = int(exon_pair1[1][1:])
for exon_pair2 in probe_level_db[key]: #exon_pair2 could be a single exon
s = 0 #moved this down!!!!!!!!!
if exon_pair2[0] == 'E': # thus, exon_pair2 is actually a single exon
e2 = int(exon_pair2[1:])
s=1
else:
e2a = int(exon_pair2[0][1:])
e2b = int(exon_pair2[1][1:])
if s==0:
e1_pair = e1a,e1b
e2_pair = e2a,e2b
if s==1: # thus, exon_pair2 is actually a single exon
e1_pair = e1a,e1b
if e1a < e2 and e1b > e2 and onlyAnalyzeJunctions == 'no': # e.g. E3-E5 vs. E4
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2x = 'ei','E'+str(e2)
critical_exons = [e1,e2x]
critical_exons.sort(); critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2)]] ###The 1 indicates that the exon can be called up or down, since it is an ei or ex event vs. mx
### Note: everything except for the last one should have two instances added to the database
elif (e1b == e2b and e1a > e2a): # e.g. E2-E3 vs. E1-E3
e1 = 'ei','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ex','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e1a)]]
#print affygene, exon_pair1,e1a,e1b,'----',exon_pair2,e2a,e2b
elif (e1b == e2b and e1a < e2a): # e.g. E1-E3 vs. E2-E3
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ei','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2a)]]
elif (e1a == e2a and e1b < e2b): # e.g. E2-E3 vs. E2-E4
e1 = 'ei','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ex','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e1b)]]
elif (e1a == e2a and e1b > e2b): # e.g. E2-E4 vs. E2-E3
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ei','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2b)]]
elif (e1a < e2a and e1b > e2a) and (e1a < e2b and e1b > e2b): # e.g. E2-E6 vs. E3-E5
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ei','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2a),'E'+str(e2b)]]
elif (e1a > e2a and e1b < e2a) and (e1a > e2b and e1b < e2b): # e.g. E3-E5 vs. E2-E6
e1 = 'ei','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ex','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e1a),'E'+str(e1b)]]
elif (e1a < e2a and e1b > e2a): # e.g. E2-E6 vs. E3-E8
e1 = 'mx','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'mx','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [2,['E'+str(e1b),'E'+str(e2a)]]
elif (e1a < e2b and e1b > e2b): # e.g. E2-E6 vs. E1-E3
e1 = 'mx','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'mx','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [2,['E'+str(e1a),'E'+str(e2b)]]
if len(critical_exon_list)>0:
for entry in critical_exon_list:
try:
alt_junction_db[affygene].append(entry)
except KeyError:
alt_junction_db[affygene] = [entry]
alt_junction_db = eliminate_redundant_dict_values(alt_junction_db)
return alt_junction_db, critical_exon_db
def exportJunctionComparisons(alt_junction_db,critical_exon_db,exon_dbase):
competitive_junction_export = 'AltDatabase\Mm\AltMouse\AltMouse_junction-comparisons.txt'
fn=filepath(competitive_junction_export)
data = open(fn,'w')
title = ['Affygene','probeset1','probeset2','critical-exons']; title = string.join(title,'\t')+'\n'; data.write(title)
for affygene in alt_junction_db:
alt_junction_db[affygene].sort() ### Should be no need to sort later if we do this
for event in alt_junction_db[affygene]:
### event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]; probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)];critical_exon_list = critical_exon_list[1]
critical_exon_list = string.join(critical_exon_list,'|')
export_data = string.join([affygene]+[probeset1,probeset2,critical_exon_list],'\t')+'\n'
data.write(export_data)
data.close()
def annotate_splice_event(exons1,exons2,block_structure):
#1(E13|E12)-2(E11)-3(E10)-4(E9|E8)-5(E7|E6|E5)-6(E4|E3|E2|E1)
splice_event = ''; evidence = 'clear'
string.replace(block_structure,')','|')
block_list = string.split(block_structure,'-')
#[1(E13|E12|,2(E11|,3(E10|,4(E9|E8|,5(E7|E6|E5|,6(E4|E3|E2|E1|]
###Perform a membership query
try: exon1a,exon1b = string.split(exons1,'-') ###***
except ValueError: exon1a = exons1; exon1b = exons1; evidence = 'unclear'
try: exon2a,exon2b = string.split(exons2,'-')
except ValueError: exon2a = exons2; exon2b = exons2; evidence = 'unclear'
a = '';b = '';block_a='';block_b=''
if exon1a == exon2a: a = 'same'
if exon1b == exon2b: b = 'same'
ex1a_m = exon_membership(exon1a,block_list);ex2a_m = exon_membership(exon2a,block_list)
ex1b_m = exon_membership(exon1b,block_list);ex2b_m = exon_membership(exon2b,block_list)
#print ex1a_m, ex2a_m,ex1b_m,ex2b_m;dog
if ex1a_m == ex2a_m: block_a = 'same'
if ex1b_m == ex2b_m: block_b = 'same'
### Correct for strand differences
strand = "+"
if ex1a_m > ex1b_m: #strand therefore is negative
strand = "-"
if (abs(ex1a_m - ex2a_m) == 1) or (abs(ex1b_m - ex2b_m) == 1): alternative_exons = 'one'
else: alternative_exons = 'multiple'
if (ex1a_m == -1) or (ex2a_m == -1) or (ex1b_m == -1) or (ex2b_m == -1): splice_event = "retained_intron"
elif block_a == 'same' and b == 'same': splice_event = "alt5'"
elif block_b == 'same' and a == 'same': splice_event = "alt3'"
elif (block_a == 'same' and block_b != 'same'):
if a == 'same':
if alternative_exons == 'one': splice_event = "cassette-exon"
else: splice_event = "cassette-exons"
else:
if alternative_exons == 'one': splice_event = "alt5'-cassette-exon"
else: splice_event = "alt5'-cassette-exons"
elif (block_b == 'same' and block_a != 'same'):
if b == 'same':
if alternative_exons == 'one': splice_event = "cassette-exon"
else: splice_event = "cassette-exons"
else:
if alternative_exons == 'one': splice_event = "cassette-exon-alt3'"
else: splice_event = "cassette-exons-alt3'"
else:
if alternative_exons == 'one': splice_event = "alt5'-cassette-exon-alt3'"
else: splice_event = "alt5'-cassette-exons-alt3'"
if evidence == 'unclear':
###If the first probeset is a junction and the second is an exon, are junction exons 2 blocks way
if (abs(ex1a_m - ex2a_m) == 1) and (abs(ex1b_m - ex2b_m) == 1): splice_event = "cassette-exon"
elif (block_a == 'same' and block_b != 'same'):
if alternative_exons == 'one': splice_event = "alt5'"
else: splice_event = "alt5'-cassette-exons"
elif (block_a != 'same' and block_b == 'same'):
if alternative_exons == 'one': splice_event = "alt3'"
else: splice_event = "cassette-exons-alt3'"
else: splice_event = "unclear"
if strand == "-":
if splice_event == "alt5'": splice_event = "alt3'"
elif splice_event == "alt3'": splice_event = "alt5'"
elif splice_event == "alt5'-cassette-exon": splice_event = "cassette-exon-alt3'"
elif splice_event == "alt5'-cassette-exons": splice_event = "cassette-exons-alt3'"
elif splice_event == "cassette-exons-alt3'": splice_event = "alt5'-cassette-exons"
elif splice_event == "cassette-exon-alt3'": splice_event = "alt5'-cassette-exon"
#print splice_event
return splice_event
def exon_membership(exon,block_structure):
i=0; x = -1
exon_temp1 = exon+'|'; exon_temp2 = exon+')'
for exon_block in block_structure:
if exon_temp1 in exon_block or exon_temp2 in exon_block:
x = i
i += 1
return x
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
if __name__ == '__main__':
exons1 = 'E9-E12'
exons2 = 'E11-E15'
block_structure = '1(E1)-2(E2)-3(E3|E4)-4(E5)-5(E6)-6(E7|E8|E9|E10|E11)-7(E12)-8(E13|E14)-9(E15)-10(E16|E17)-11(E18)-12(E19|E20)-13(E21|E22)-14(E23|E24)'
a = annotate_splice_event(exons1,exons2,block_structure)
print a
#alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db = identifyPutativeSpliceEvents(exon_db,constituitive_probeset_db,agglomerate_inclusion_probesets)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/ExonAnnotate_module.py
|
ExonAnnotate_module.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import csv
import scipy.io
import numpy
import time
import math
def normalizeDropSeqCounts(expFile,log=True):
start_time = time.time()
print 'log2 conversion equals',log
firstLine = True
mat_array=[]
gene_names = []
for line in open(expFile,'rU').xreadlines():
line = string.replace(line,'"','')
data = line.rstrip('\n')
t = string.split(data,'\t')
if firstLine:
barcodes = t[1:]
firstLine = False
else:
gene_names.append(t[0])
mat_array.append(map(float,t[1:]))
mat_array = numpy.array(mat_array)
### Write out CPM normalized data matrix
norm_path = expFile[:-4]+'_matrix_CPTT.txt' ### AltAnalyze designated ExpressionInput file (not counts)
print 'Normalizing gene counts to counts per ten thousand (CPTT)'
barcode_sum = numpy.sum(mat_array,axis=0) ### Get the sum counts for all barcodes, 0 is the y-axis in the matrix
start_time = time.time()
mat_array = mat_array.transpose()
vfunc = numpy.vectorize(calculateCPTT)
norm_mat_array=[]
i=0
for vector in mat_array:
norm_mat_array.append(vfunc(vector,barcode_sum[i]))
i+=1
mat_array = numpy.array(norm_mat_array)
mat_array = mat_array.transpose()
mat_array = numpy.ndarray.tolist(mat_array) ### convert to non-numpy list
i=0
updated_mat=[['UID']+barcodes]
for ls in mat_array:
updated_mat.append([gene_names[i]]+ls); i+=1
updated_mat = numpy.array(updated_mat)
del mat_array
numpy.savetxt(norm_path,updated_mat,fmt='%s',delimiter='\t')
print '... scaling completed in ',time.time()-start_time, 'seconds'
print 'CPTT written to file:',
print norm_path
def calculateCPTT(val,barcode_sum,log=True):
if val==0:
return '0'
else:
if log:
return math.log((10000.00*val/barcode_sum)+1.0,2) ### convert to log2 expression
else:
return 10000.00*val/barcode_sum
def normalizeDropSeqCountsMemoryEfficient(expFile,log=True):
""" A more memory efficient function than the above for scaling scRNA-Seq, line-by-line """
start_time = time.time()
print 'log2 conversion equals',log
firstLine = True
mat_array=[]
gene_names = []
for line in open(expFile,'rU').xreadlines():
line = string.replace(line,'"','')
data = line.rstrip('\n')
if '\t' in data:
t = string.split(data,'\t')
else:
t = string.split(data,',')
if firstLine:
barcodes = t[1:]
firstLine = False
count_sum_array=[0]*len(barcodes)
else:
gene_names.append(t[0])
values = map(float,t[1:])
count_sum_array = [sum(value) for value in zip(*[count_sum_array,values])]
### Import the expression dataset again and now scale
output_file = expFile[:-4]+'_CPTT-log2.txt'
export_object = open(output_file,'w')
firstLine=True
for line in open(expFile,'rU').xreadlines():
line = string.replace(line,'"','')
data = line.rstrip('\n')
if '\t' in data:
t = string.split(data,'\t')
else:
t = string.split(data,',')
if firstLine:
export_object.write(string.join(t,'\t')+'\n')
firstLine = False
else:
gene = t[0]
if 'ENS' in gene and '.' in gene:
gene = string.split(gene,'.')[0]
values = map(float,t[1:])
index=0
cptt_values = []
for barcode in barcodes:
barcode_sum = count_sum_array[index]
val = values[index]
cptt_val = calculateCPTT(val,barcode_sum)
cptt_values.append(cptt_val)
index+=1
values = string.join([gene]+map(lambda x: str(x)[:7], cptt_values),'\t')
export_object.write(values+'\n')
export_object.close()
print '... scaling completed in ',time.time()-start_time, 'seconds'
return output_file
def CSVformat(matrices_dir,cells_dir,emptydrops=True,target_organ=None,expressionCutoff=500):
""" Process an HCA DCP csv dense matrix format """
### Import and process the cell metadata
export_object = open(cells_dir[:-4]+'_'+target_organ+'-filtered.csv','w')
cell_ids_to_retain={}
firstLine=True
for line in open(cells_dir,'rU').xreadlines():
line = string.replace(line,'"','')
data = line.rstrip('\n')
t = string.split(data,',')
if firstLine:
index=0
for i in t:
if i == 'emptydrops_is_cell': edi = index
elif i == 'genes_detected': gdi = index
elif i == 'barcode': bi = index
elif i == 'derived_organ_label': oi = index
index+=1
export_object.write(line)
firstLine = False
else:
cell_id = t[0]
empty_drops = t[edi]
genes_detected = int(t[gdi])
barcode = t[bi]
organ = t[oi]
proceed=True
if emptydrops:
if empty_drops == 'f':
proceed = False
if target_organ != None:
if target_organ != organ:
proceed = False
if genes_detected<expressionCutoff:
proceed = False
if proceed:
cell_ids_to_retain[cell_id]=barcode
export_object.write(line)
print len(cell_ids_to_retain), 'IDs matching the user filters'
export_object.close()
if target_organ != None:
export_object = open(matrices_dir[:-4]+'_'+target_organ+'-filtered.txt','w')
else:
export_object = open(matrices_dir[:-4]+'-filtered.txt','w')
### Import and filter the flat expression data
firstLine=True
count=0
print 'Increments of 10,000 cells exported:',
for line in open(matrices_dir,'rU').xreadlines():
line = string.replace(line,'"','')
data = line.rstrip('\n')
t = string.split(data,',')
if firstLine:
export_object.write(string.join(t,'\t')+'\n')
firstLine = False
else:
cell_id = t[0]
if cell_id in cell_ids_to_retain:
export_object.write(string.join(t,'\t')+'\n')
if count == 10000:
count = 0
print '*',
count+=1
export_object.close()
if __name__ == '__main__':
import getopt
log=True
expressionCutoff = 500
organ = None
emptydrops = True
cells_dir = None
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Insufficient options provided";sys.exit()
#Filtering samples in a datasets
#python DropSeqProcessing.py --i dropseq.txt
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','log=','csv=','organ=','expressionCutoff=',
'emptydrops=','cells='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--i': matrices_dir=arg
elif opt == '--csv': matrices_dir=arg
elif opt == '--cells': cells_dir=arg
elif opt == '--organ': target_organ=arg
elif opt == '--expressionCutoff':
expressionCutoff=float(arg)
elif opt == '--emptydrops':
if 'f' in arg or 'F' in arg:
emptydrops=False
elif opt == '--log':
if string.lower(arg) == 'true' or string.lower(arg) == 'yes':
pass
else:
log = False
if cells_dir != None:
CSVformat(matrices_dir,cells_dir,emptydrops=emptydrops,target_organ=target_organ,expressionCutoff=expressionCutoff)
else:
normalizeDropSeqCountsMemoryEfficient(matrices_dir)
#normalizeDropSeqCounts(matrices_dir,log=log)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/CountsNormalize.py
|
CountsNormalize.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import export
import unique
import traceback
""" Intersecting Coordinate Files """
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def eCLIPimport(folder):
eCLIP_dataset_peaks={}
annotations=[]
files = unique.read_directory(folder)
for file in files:
if '.bed' in file:
peaks={}
dataset = file[:-4]
print dataset
key_db={}
fn = unique.filepath(folder+'/'+file)
eo = export.ExportFile(folder+'/3-prime-peaks/'+file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
chr = t[0]
start = int(t[1])
end = int(t[2])
strand = t[5]
gene = string.split(t[-3],'.')[0]
annotation = string.split(t[-3],';')[-1]
if 'three_prime_utrs' in annotation:
eo.write(line)
if annotation not in annotations:
annotations.append(annotation)
symbol = t[-2]
key = chr,start,strand
#"""
if gene in coding_db:
coding_type = coding_db[gene][-1]
if 'protein_coding' in coding_type:
coding_type = 'protein_coding'
##"""
if key in key_db:
gene_db = key_db[key]
if gene in gene_db:
gene_db[gene].append(annotation)
else:
gene_db[gene]=[annotation]
else:
gene_db={}
gene_db[gene]=[annotation]
key_db[key]=gene_db
for key in key_db:
ranking=[]
for gene in key_db[key]:
ranking.append((len(key_db[key][gene]),gene))
ranking.sort()
gene = ranking[-1][-1]
for annotation in key_db[key][gene]:
if annotation in peaks:
peaks[annotation]+=1
else:
peaks[annotation]=1
eCLIP_dataset_peaks[dataset]=peaks
eo.close()
annotations.sort()
eo = export.ExportFile(folder+'/summary-annotations/summary.txt')
header = string.join(['RBP']+map(str,annotations),'\t')+'\n'
eo.write(header)
for dataset in eCLIP_dataset_peaks:
annot=[]
peaks = eCLIP_dataset_peaks[dataset]
for annotation in annotations:
if annotation in peaks:
annot.append(peaks[annotation])
else:
annot.append(0)
annot = map(lambda x: (1.000*x/sum(annot)), annot)
values = string.join([dataset]+map(str,annot),'\t')+'\n'
eo.write(values)
eo.close()
if __name__ == '__main__':
################ Comand-line arguments ################
import getopt
CLIP_dir = None
species = 'Hs'
""" Usage:
bedtools intersect -wb -a /Clip_merged_reproducible_ENCODE/K562/AARS-human.bed -b /annotations/combined/hg19_annotations-full.bed > /test.bed
"""
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print 'WARNING!!!! Too commands supplied.'
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['species=','clip='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--species':
species = arg
elif opt == '--clip':
CLIP_dir = arg
import ExpressionBuilder
coding_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
dataset_peaks = eCLIPimport(CLIP_dir)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/peakAnnotation.py
|
peakAnnotation.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import time
import random
import math
import sqlite3
import export
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def filepath(filename):
try:
import unique ### local to AltAnalyze
fn = unique.filepath(filename)
except Exception:
### Should work fine when run as a script with this (AltAnalyze code is specific for packaging with AltAnalyze)
dir=os.path.dirname(dirfile.__file__)
try: dir_list = os.listdir(filename); fn = filename ### test to see if the path can be found (then it is the full path)
except Exception: fn=os.path.join(dir,filename)
return fn
##### SQLite Database Access ######
def createSchemaTextFile(species,platform,schema_text,DBname):
schema_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'_schema.sql')
export_data = export.ExportFile(schema_filename)
### We will need to augment the database with protein feature annotations for
export_data.write(schema_text)
export_data.close()
def populateSQLite(species,platform,DBname,schema_text=None):
global conn
""" Since we wish to work with only one gene at a time which can be associated with a lot of data
it would be more memory efficient transfer this data to a propper relational database for each query """
db_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'.db') ### store in user directory
schema_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'_schema.sql')
### Check to see if the database exists already and if not creat it
db_is_new = not os.path.exists(db_filename)
with sqlite3.connect(db_filename) as conn:
if db_is_new:
createSchemaTextFile(species,platform,schema_text,DBname)
print 'Creating schema'
with open(schema_filename, 'rt') as f:
schema = f.read()
#print schema
conn.executescript(schema)
else:
print 'Database exists, assume schema does too.'
#sys.exit()
return conn ### User must now add data to the empty SQLite database
def connectToDB(species,platform,DBname):
db_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'.db') ### store in user directory
with sqlite3.connect(db_filename) as conn:
return conn
def retreiveDatabaseFields(conn,ids,query):
""" Retreive data from specific fields from the database """
cursor = conn.cursor()
#id = 'ENSG00000114127'
#query = "select id, name, description, chr, strand from genes where id = ?"
cursor.execute(query,ids) ### In this way, don't have to use %s and specify type
ls=[]
for row in cursor.fetchall():
#id, name, description, chr, strand = row
#print '%s %s %s %s %s' % (id, name, description, chr, strand)
ls.append(row)
return ls
def bulkLoading():
import csv
import sqlite3
import sys
db_filename = 'todo.db'
data_filename = sys.argv[1]
SQL = """insert into task (details, priority, status, deadline, project)
values (:details, :priority, 'active', :deadline, :project)
"""
with open(data_filename, 'rt') as csv_file:
csv_reader = csv.DictReader(csv_file)
with sqlite3.connect(db_filename) as conn:
cursor = conn.cursor()
cursor.executemany(SQL, csv_reader)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/SQLInterface.py
|
SQLInterface.py
|
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
import copy,getopt
import time
import traceback
try: import export
except Exception: pass
try: import unique
except Exception: pass
import Bio; from Bio.Seq import Seq
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def parseFASTQFile(fn):
count=0
spacer='TGGT'
global_count=0
read2_viral_barcode={}
read1_cellular_barcode={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
global_count+=1
if count == 0:
read_id = string.split(data,' ')[0][1:]
count+=1
elif count == 1:
sequence = data
count+=1
else:
count+=1
if count == 4:
count = 0
if 'R2' in fn:
if spacer in sequence:
if sequence.index(spacer) == 14:
viral_barcode = sequence[:48]
read2_viral_barcode[read_id]=viral_barcode
else: ### Reverse complement
sequence = Seq(sequence)
sequence=str(sequence.reverse_complement())
if spacer in sequence:
if sequence.index(spacer) == 14:
viral_barcode = sequence[:48]
read2_viral_barcode[read_id]=viral_barcode
if 'R1' in fn:
if 'TTTTT' in sequence:
cell_barcode = sequence[:16]
read1_cellular_barcode[read_id]=cell_barcode
elif 'AAAAA' in sequence: ### Reverse complement
sequence = Seq(sequence)
cell_barcode=str(sequence.reverse_complement())[:16]
read1_cellular_barcode[read_id]=cell_barcode
if 'R2' in fn:
return read2_viral_barcode
else:
return read1_cellular_barcode
def outputPairs(fastq_dir,read1_cellular_barcode,read2_viral_barcode):
outdir = fastq_dir+'.viral_barcodes.txt'
o = open (outdir,"w")
unique_pairs={}
for uid in read2_viral_barcode:
if uid in read1_cellular_barcode:
cellular = read1_cellular_barcode[uid]
viral = read2_viral_barcode[uid]
if (viral,cellular) not in unique_pairs:
o.write(viral+'\t'+cellular+'\n')
unique_pairs[(viral,cellular)]=[]
o.close()
if __name__ == '__main__':
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a SAM file as input in the command-line"
print "Example: python BAMtoJunctionBED.py --i /Users/me/sample1.fastq"
sys.exit()
else:
Species = None
options, remainder = getopt.getopt(sys.argv[1:],'', ['i='])
for opt, arg in options:
if opt == '--i': fastq_dir=arg ### full path of a BAM file
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if 'R1' in fastq_dir:
r1 = fastq_dir
r2 = string.replace(fastq_dir,'R1','R2')
else:
r1 = string.replace(fastq_dir,'R2','R1')
r2= fastq_dir
read2_viral_barcode = parseFASTQFile(r2)
read1_cellular_barcode = parseFASTQFile(r1)
outputPairs(fastq_dir,read1_cellular_barcode,read2_viral_barcode)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/SAMtoBarcode.py
|
SAMtoBarcode.py
|
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This script can be run on its own to extract a single BAM file at a time or
indirectly by multiBAMtoBED.py to extract exon.bed files (Tophat format)
from many BAM files in a single directory at once. Requires an exon.bed reference
file for exon coordinates (genomic bins for which to sum unique read counts).
Excludes junction reads within each interval"""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
import copy
import time
import getopt
def findGeneVariants(species,symbols,bam_dir,variants=None):
global insertion_db
insertion_db={}
print symbols
print bam_dir
if len(symbols)>0:
### Search for genes not for coordinates
search_locations = geneCoordinates(species,symbols)
else:
### Search for coordinates and not genes
search_locations = variantCoordinates(variants)
### Discover the variants
variant_db = findVariants(bam_dir,search_locations)
variant_filtered_db={}
for var in variant_db:
#print var, variant_db[var]
if variant_db[var]>3:
#print var,variant_db[var]
variant_filtered_db[var] = variant_db[var]
### Quantify the variants versus background
pileupAnalysis(bam_dir,variant_filtered_db)
def variantCoordinates(variants):
search_locations=[]
contents = open(variants, "rU")
for line in contents:
line = line.rstrip()
chr,start,end,symbol = string.split(line,'\t')
if 'chr' not in chr: chr = 'chr'+chr
strand = 'NA'
search_locations.append([chr,strand,start,end,symbol])
return search_locations
def geneCoordinates(species,symbols):
genes=[]
from build_scripts import EnsemblImport
ensembl_annotation_db = EnsemblImport.reimportEnsemblAnnotations(species,symbolKey=True)
for symbol in symbols:
if symbol in ensembl_annotation_db:
ens_geneid = ensembl_annotation_db[symbol]
genes.append((ens_geneid,symbol))
else:
print symbol, 'not found'
### Get gene genomic locations
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
search_locations=[]
for (gene,symbol) in genes:
chr,strand,start,end = gene_location_db[gene]
#if symbol == 'SRSF10': chr = 'chr1'; strand = '-'; start = '24295573'; end = '24306953'
if len(chr)>6: print symbol, 'bad chromosomal reference:',chr
else:
search_locations.append([chr,strand,start,end,symbol])
return search_locations
def findVariants(bam_dir,search_locations,multi=False):
start_time = time.time()
bamfile = pysam.Samfile(bam_dir, "rb" )
output_bed_rows=0
#https://www.biostars.org/p/76119/
variant_db={}
reference_rows=0
o = open (string.replace(bam_dir,'.bam','__variant.txt'),"w")
for (chr,strand,start,stop,symbol) in search_locations: ### read each line one-at-a-time rather than loading all in memory
read_count=0
reference_rows+=1
stop=int(stop)+100 ### buffer for single variants
start=int(start)-100 ### buffer for single variants
for alignedread in bamfile.fetch(chr, int(start),int(stop)):
md = alignedread.opt('MD')
omd = md
codes = map(lambda x: x[0],alignedread.cigar)
cigarstring = alignedread.cigarstring
#print symbol,cigarstring
if 1 in codes and alignedread.pos:
### Thus an insertion is present
cigarstring = alignedread.cigarstring
chr = bamfile.getrname(alignedread.rname)
pos = alignedread.pos
def getInsertions(cigarList,X):
cummulative=0
coordinates=[]
for (code,seqlen) in cigarList:
if code == 0 or code == 3:
cummulative+=seqlen
if code == 1:
coordinates.append(X+cummulative)
return coordinates
coordinates = getInsertions(alignedread.cigar,pos)
"""
print pos
print coordinates
print alignedread.seq
print codes
print alignedread.cigar
print cigarstring
print md;sys.exit()
"""
for pos in coordinates:
try: variant_db[chr,pos,symbol]+=1
except Exception: variant_db[chr,pos,symbol] = 1
insertion_db[chr,pos]=[]
continue
try:
int(md) ### If an integer, no mismatches or deletions present
continue
except Exception:
#print chr, int(start),int(stop)
#print alignedread.get_reference_sequence()
#print alignedread.seq
md = string.replace(md,'C','A')
md = string.replace(md,'G','A')
md = string.replace(md,'T','A')
md = string.split(md,'A')
pos = alignedread.pos
chr = bamfile.getrname(alignedread.rname)
#if omd == '34^GA16': print md, pos
for i in md[:-1]:
try:
pos+=int(i)+1
except Exception:
if i == '':
pos+=+1
elif '^' in i: ### position is equal to the last position
pos+=int(string.split(i,'^')[0])+1
#pass
#if 'CGGATCC' in alignedread.seq: print string.split(alignedread.seq,'CGGATCC')[1],[pos]
try: variant_db[chr,pos,symbol]+=1
except Exception: variant_db[chr,pos,symbol] = 1
#codes = map(lambda x: x[0],alignedread.cigar)
output_bed_rows+=1
o.close()
bamfile.close()
if multi==False:
print time.time()-start_time, 'seconds to assign reads for %d entries from %d reference entries' % (output_bed_rows,reference_rows)
#print variant_db;sys.exit()
return variant_db
def pileupAnalysis(bam_dir,search_locations,multi=False):
start_time = time.time()
bamfile = pysam.Samfile(bam_dir, "rb" )
reference_rows=0
output_bed_rows=0
#https://www.biostars.org/p/76119/
variant_db={}
o = open (string.replace(bam_dir,'.bam','__variant.txt'),"w")
entries = ['chr','position','rare-allele frq','type','depth','gene','variant_info','alt_frq']
o.write(string.join(entries,'\t')+'\n')
#print 'Analyzing',len(search_locations),'variants'
for (chr,pos,symbol) in search_locations: ### read each line one-at-a-time rather than loading all in memory
pos = int(pos)
read_count=0
reference_rows+=1
nucleotide_frequency={}
for pileupcolumn in bamfile.pileup(chr,pos,pos+1):
# Skip columns outside desired range
#print pos, pileupcolumn.pos, pileupcolumn.cigarstring, pileupcolumn.alignment.pos
if pileupcolumn.pos == (pos-1):
for pileupread in pileupcolumn.pileups:
try: nt = pileupread.alignment.query_sequence[pileupread.query_position]
except Exception,e:
if 'D' in pileupread.alignment.cigarstring:
nt = 'del'
else:
nt = 'ins'
try: nucleotide_frequency[nt]+=1
except Exception: nucleotide_frequency[nt]=1
nt_freq_list=[]
nt_freq_list_tuple=[]
for nt in nucleotide_frequency:
nt_freq_list.append(nucleotide_frequency[nt])
nt_freq_list_tuple.append([nucleotide_frequency[nt],nt])
s = sum(nt_freq_list)
nt_freq_list.sort()
nt_freq_list_tuple.sort()
try:
frq = float(search_locations[chr,pos,symbol])/s ### This fixes that (number of insertions from before)
except Exception: frq = '1.000000'; print symbol, pos, nucleotide_frequency, search_locations[chr,pos,symbol]
if (chr,pos) in insertion_db:
#print 'insertion', chr, pos
call = 'insertion'
### For insertions if the inserted base matches the reference base, incorrect freq will be reported
elif 'del' in nucleotide_frequency:
#frq = float(nt_freq_list[-2])/s
call = 'del'
else:
#frq = float(nt_freq_list[-2])/s
call = 'mismatch'
if len(nt_freq_list)>1 or call == 'insertion':
if frq>0.01:
frq = str(frq)[:4]
most_frequent_frq,most_frequent_nt = nt_freq_list_tuple[-1]
try:
second_most_frequent_frq,second_most_frequent_nt = nt_freq_list_tuple[-2]
alt_frq = str(float(second_most_frequent_frq)/most_frequent_frq)
except Exception:
second_most_frequent_frq = 'NA'; second_most_frequent_nt='NA'
alt_frq = 'NA'
variant_info = most_frequent_nt+'('+str(most_frequent_frq)+')|'+second_most_frequent_nt+'('+str(second_most_frequent_frq)+')'
entries = [chr,str(pos),str(frq),call,str(s),symbol,variant_info,alt_frq]
o.write(string.join(entries,'\t')+'\n')
output_bed_rows+=1
o.close()
bamfile.close()
if multi==False:
print time.time()-start_time, 'seconds to assign reads for %d entries from %d reference entries' % (output_bed_rows,reference_rows)
if __name__ == "__main__":
#bam_dir = "H9.102.2.6.bam"
#reference_dir = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
symbols=[]
variantFile = None
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoExonBED.py --i /Users/me/sample1.bam --r /Users/me/Hs_exon-cancer_hg19.bed"
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species=','g=','v='])
for opt, arg in options:
if opt == '--i': bam_dir=arg ### A single BAM file location (full path)
elif opt == '--species': species=arg
elif opt == '--g': symbols.append(arg)
elif opt == '--v': variantFile = arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
findGeneVariants(species,symbols,bam_dir,variants=variantFile)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BAMtoGeneVariants.py
|
BAMtoGeneVariants.py
|
import numpy as np
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
'''
from Anne Archibald in mailinglist:
http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
'''
def __init__(self, dataset, covariance):
self.covariance = covariance
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
def __init__(self, dataset, covfact = 'scotts'):
self.covfact = covfact
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
def covariance_factor(self):
if self.covfact in ['sc', 'scotts']:
return self.scotts_factor()
if self.covfact in ['si', 'silverman']:
return self.silverman_factor()
elif self.covfact:
return float(self.covfact)
else:
raise ValueError, \
'covariance factor has to be scotts, silverman or a number'
def reset_covfact(self, covfact):
self.covfact = covfact
self.covariance_factor()
self._compute_covariance()
def plotkde(covfact):
gkde.reset_covfact(covfact)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
plt.legend()
from numpy.testing import assert_array_almost_equal, \
assert_almost_equal, assert_
def test_kde_1d():
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
print xnmean, xnstd
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density funtion for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
print 'MSE', np.sum((kdepdf - normpdf)**2)
print 'axabserror', np.max(np.abs(kdepdf - normpdf))
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
#assert_array_almost_equal(kdepdf, normpdf, decimal=2)
print gkde.integrate_gaussian(0.0, 1.0)
print gkde.integrate_box_1d(-np.inf, 0.0)
print gkde.integrate_box_1d(0.0, np.inf)
print gkde.integrate_box_1d(-np.inf, xnmean)
print gkde.integrate_box_1d(xnmean, np.inf)
assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
## (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
# generate a sample
n_basesample = 1000
np.random.seed(8765678)
alpha = 0.6 #weight for (prob of) lower distribution
mlow, mhigh = (-3,3) #mean locations for gaussian mixture
xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),
mhigh + np.random.randn((1-alpha) * n_basesample)])
# get kde for original sample
#gkde = stats.gaussian_kde(xn)
gkde = gaussian_kde_covfact(xn, 0.1)
# evaluate the density funtion for the kde for some points
ind = np.linspace(-7,7,101)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
gkde = gaussian_kde_covfact(xn, 'scotts')
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
#plt.show()
for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
plotkde(cv)
test_kde_1d()
np.random.seed(8765678)
n_basesample = 1000
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/kde_subclass.py
|
kde_subclass.py
|
import os
import sys
import time
import glob
import misopy
from misopy.settings import Settings
from misopy.settings import miso_path as miso_settings_path
import misopy.hypothesis_test as ht
import misopy.as_events as as_events
import misopy.cluster_utils as cluster_utils
import misopy.sam_utils as sam_utils
import misopy.miso_sampler as miso
import misopy.Gene as gene_utils
import misopy.gff_utils as gff_utils
import misopy.misc_utils as misc_utils
from misopy.parse_csv import *
from misopy.samples_utils import *
import numpy as np
np.seterr(all='ignore')
miso_path = os.path.dirname(os.path.abspath(__file__))
def greeting(parser=None):
print "MISO (Mixture of Isoforms model)"
print "Compare MISO samples to get differential isoform statistics."
print "Use --help argument to view options.\n"
print "Example usage:\n"
print "compare_miso --compare-samples sample1/ sample2/ results/"
if parser is not None:
parser.print_help()
def main():
from optparse import OptionParser
parser = OptionParser()
##
## Psi utilities
##
parser.add_option("--compare-samples", dest="samples_to_compare",
nargs=3, default=None,
help="Compute comparison statistics between the two " \
"given samples. Expects three directories: the first is " \
"sample1's MISO output, the second is sample2's MISO " \
"output, and the third is the directory where " \
"results of the sample comparison will be outputted.")
parser.add_option("--comparison-labels", dest="comparison_labels",
nargs=2, default=None,
help="Use these labels for the sample comparison "
"made by --compare-samples. "
"Takes two arguments: the label for sample 1 "
"and the label for sample 2, where sample 1 and "
"sample 2 correspond to the order of samples given "
"to --compare-samples.")
parser.add_option("--use-compressed", dest="use_compressed",
nargs=1, default=None,
help="Use compressed event IDs. Takes as input a "
"genes_to_filenames.shelve file produced by the "
"index_gff script.")
(options, args) = parser.parse_args()
if options.samples_to_compare is None:
greeting()
use_compressed = None
if options.use_compressed is not None:
use_compressed = \
os.path.abspath(os.path.expanduser(options.use_compressed))
if not os.path.exists(use_compressed):
print "Error: mapping filename from event IDs to compressed IDs %s " \
"is not found." %(use_compressed)
sys.exit(1)
else:
print "Compression being used."
if options.samples_to_compare is not None:
sample1_dirname = os.path.abspath(options.samples_to_compare[0])
sample2_dirname = os.path.abspath(options.samples_to_compare[1])
output_dirname = os.path.abspath(options.samples_to_compare[2])
if not os.path.isdir(output_dirname):
print "Making comparisons directory: %s" %(output_dirname)
misc_utils.make_dir(output_dirname)
ht.output_samples_comparison(sample1_dirname,
sample2_dirname,
output_dirname,
sample_labels=options.comparison_labels,
use_compressed=use_compressed)
if __name__ == '__main__':
main()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/compare_miso.py
|
compare_miso.py
|
import os
import sys
import time
import glob
import misopy
from misopy.settings import Settings
from misopy.settings import miso_path as miso_settings_path
import misopy.hypothesis_test as ht
import misopy.as_events as as_events
import misopy.cluster_utils as cluster_utils
import misopy.sam_utils as sam_utils
import misopy.miso_sampler as miso
import misopy.Gene as gene_utils
import misopy.gff_utils as gff_utils
import misopy.misc_utils as misc_utils
import misopy.samples_utils as samples_utils
from misopy.parse_csv import *
import numpy as np
np.seterr(all='ignore')
miso_path = os.path.dirname(os.path.abspath(__file__))
def greeting(parser=None):
print "MISO (Mixture of Isoforms model)"
print "Summarize MISO output to get Psi values and confidence intervals."
print "Use --help argument to view options.\n"
if parser is not None:
parser.print_help()
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--summarize-samples", dest="summarize_samples",
nargs=2, default=None,
help="Compute summary statistics of the given set "
"of samples. Expects a directory with MISO output "
"and a directory to output summary file to.")
parser.add_option("--summary-label", dest="summary_label",
nargs=1, default=None,
help="Label for MISO summary file. If not given, "
"uses basename of MISO output directory.")
parser.add_option("--use-compressed", dest="use_compressed",
nargs=1, default=None,
help="Use compressed event IDs. Takes as input a "
"genes_to_filenames.shelve file produced by the "
"index_gff script.")
(options, args) = parser.parse_args()
greeting()
use_compressed = None
if options.use_compressed is not None:
use_compressed = \
os.path.abspath(os.path.expanduser(options.use_compressed))
if not os.path.exists(use_compressed):
print "Error: mapping filename from event IDs to compressed IDs %s " \
"is not found." %(use_compressed)
sys.exit(1)
else:
print "Compression being used."
##
## Summarizing samples
##
if options.summarize_samples:
samples_dir = \
os.path.abspath(os.path.expanduser(options.summarize_samples[0]))
if options.summary_label != None:
samples_label = options.summary_label
print "Using summary label: %s" %(samples_label)
else:
samples_label = \
os.path.basename(os.path.expanduser(samples_dir))
assert(len(samples_label) >= 1)
summary_output_dir = \
os.path.abspath(os.path.join(os.path.expanduser(options.summarize_samples[1]),
'summary'))
if not os.path.isdir(summary_output_dir):
misc_utils.make_dir(summary_output_dir)
summary_filename = os.path.join(summary_output_dir,
'%s.miso_summary' %(samples_label))
samples_utils.summarize_sampler_results(samples_dir,
summary_filename,
use_compressed=use_compressed)
if __name__ == "__main__":
main()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/summarize_miso.py
|
summarize_miso.py
|
import os
import sys
import time
import zipfile
import sqlite3
import shutil
import fnmatch
import glob
import misopy
import misopy.misc_utils as utils
import misopy.miso_utils as miso_utils
import misopy.miso_db as miso_db
class MISOCompressor:
"""
Compressor/uncompressor of MISO output-containing directories.
The compressor:
(1) copies the original directory containing MISO directories
(2) creates MISO databases (.miso_db files)
from the *.miso-containing directories
(3) then zip's up the resulting directory
The compressor works on the copy and leaves the original
directory unmodified.
"""
def __init__(self):
self.input_dir = None
self.output_dir = None
# Extension for compressed directory that contains
# any sort of MISO output within it
self.comp_ext = ".misozip"
def compress(self, output_filename, miso_dirnames):
"""
Takes a set of MISO input directories and compresses them
into 'output_filename'. This involves making SQL databases
for all the MISO directories and then additionally compressing the
results as a zip file.
"""
if os.path.isfile(output_filename):
print "Error: %s already exists. Please delete to overwrite." \
%(output_filename)
output_dir = "%s%s" %(output_filename, miso_db.MISO_DB_EXT)
if os.path.isdir(output_dir):
print "Error: Intermediate compressed directory %s " \
"exists. Please delete to overwrite." %(output_dir)
sys.exit(1)
for miso_dirname in miso_dirnames:
print "Processing: %s" %(miso_dirname)
if not os.path.isdir(miso_dirname):
print "Error: %s not a directory." %(miso_dirname)
sys.exit(1)
if os.path.isfile(output_filename):
print "Output file %s already exists, aborting. " \
"Please delete the file if you want " \
"compression to run."
sys.exit(1)
self.miso_dirs_to_compress = []
print "Copying source directory tree.."
shutil.copytree(miso_dirname, output_dir,
ignore=self.collect_miso_dirs)
for dir_to_compress in self.miso_dirs_to_compress:
rel_path = os.path.relpath(dir_to_compress, miso_dirname)
comp_path = os.path.join(output_dir, rel_path)
# Remove the place holder directory
os.rmdir(comp_path)
comp_path = "%s%s" %(comp_path, miso_db.MISO_DB_EXT)
miso_db.miso_dir_to_db(dir_to_compress, comp_path)
# Zip directory using conventional zip
print "Zipping compressed directory with standard zip..."
t1 = time.time()
zipper(output_dir, output_filename)
print "Deleting intermediate directory: %s" %(output_dir)
shutil.rmtree(output_dir)
t2 = time.time()
print " - Standard zipping took %.2f minutes." \
%((t2 - t1)/60.)
print "To access the SQLite representation of raw MISO output "
print "(*.miso) files, simply unzip with the .miso_zip file "
print "with standard unzip utility:\n"
print " unzip %s" %(output_filename)
def uncompress(self, compressed_filename, output_dir):
"""
Takes a compressed MISO file 'compressed_filename' and
uncompresses it into 'output_dir'.
"""
if not os.path.isfile(compressed_filename):
print "Error: Cannot find %s, aborting." \
%(compressed_filename)
if not os.path.basename(compressed_filename).endswith(self.comp_ext):
print "Warning: %s does not end in %s. Are you sure it is " \
"a file compressed by miso_zip.py?" \
%(compressed_filename, self.comp_ext)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
print "Uncompressing %s into %s" %(compressed_filename,
output_dir)
# First unzip the file using conventional unzip
unzipped_files = unzipper(compressed_filename, output_dir)
# Remove the original .zip
if os.path.isfile(compressed_filename):
print "Removing the compressed file %s" %(compressed_filename)
if os.path.isfile(compressed_filename):
os.remove(compressed_filename)
def collect_miso_dirs(self, path, dirnames):
"""
Collect raw MISO output directories
"""
if fnmatch.filter(dirnames, "*.miso"):
self.miso_dirs_to_compress.append(path)
return dirnames
return []
def compress_miso(output_filename, input_dirs,
comp_ext=".misozip"):
"""
Compress a directory containing MISO files.
Traverse directories, one by one, and look for directories
that contain
"""
output_filename = utils.pathify(output_filename)
for input_dir in input_dirs:
if not os.path.isdir(input_dir):
print "Error: Cannot find directory %s" %(input_dir)
sys.exit(1)
if not os.path.basename(output_filename).endswith(comp_ext):
print "Error: Compressed output filename must end in %s" \
%(comp_ext)
sys.exit(1)
if os.path.isfile(output_filename):
print "Error: Output filename exists. Please delete %s to overwrite." \
%(output_filename)
sys.exit(1)
t1 = time.time()
miso_comp = MISOCompressor()
miso_comp.compress(output_filename, input_dirs)
t2 = time.time()
print "Compression took %.2f minutes." %((t2 - t1)/60.)
def uncompress_miso(compressed_filename, output_dir):
"""
Uncompress MISO directory.
"""
if not os.path.isfile(compressed_filename):
print "Error: Zip file %s is not found." \
%(compressed_filename)
sys.exit(1)
t1 = time.time()
miso_comp = MISOCompressor()
miso_comp.uncompress(compressed_filename, output_dir)
t2 = time.time()
print "Uncompression took %.2f minutes." %((t2 - t1)/60.)
def zipper(dir, zip_file):
"""
Zip a directory 'dir' recursively, saving result in
'zip_file'.
by Corey Goldberg.
"""
# Enable Zip64 to allow creation of large Zip files
zip = zipfile.ZipFile(zip_file, 'w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True)
root_len = len(os.path.abspath(dir))
for root, dirs, files in os.walk(dir):
archive_root = os.path.abspath(root)[root_len:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(archive_root, f)
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
return zip_file
def unzipper(zip_file, outdir):
"""
Unzip a given 'zip_file' into the output directory 'outdir'.
Return the names of files in the archive.
"""
zf = zipfile.ZipFile(zip_file, "r",
allowZip64=True)
filenames = zf.namelist()
zf.extractall(outdir)
return filenames
def greeting():
print "Compress/uncompress MISO output. Usage:\n"
print "To compress a directory containing MISO files \'inputdir\', use: "
print " miso_zip --compress outputfile.misozip inputdir"
print "To uncompress back into a directory \'outputdir\', use: "
print " miso_zip --uncompress outputfile.misozip outputdir"
print "\nNote: compressed filename must end in \'.misozip\'"
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--compress", dest="compress", nargs=2, default=None,
help="Compress a directory containing MISO output. "
"Takes as arguments: (1) the output filename of the "
"compressed file, (2) a comma-separated list of "
"directory names to be compressed. "
"Example: --compress output.misozip dirname1,dirname2")
parser.add_option("--uncompress", dest="uncompress", nargs=2, default=None,
help="Uncompress a file generated by compress_miso. "
"Takes as arguments: (1) the filename to be "
"uncompressed, and (2) the directory to place the "
"uncompressed representation into. "
"Example: --uncompress output.misozip outputdir")
(options, args) = parser.parse_args()
if (options.compress is None) and (options.uncompress is None):
greeting()
sys.exit(1)
elif (options.compress is not None) and (options.uncompress is not None):
# Can't be given both.
greeting()
print "Error: Cannot process --compress and --uncompress at same time."
sys.exit(1)
if options.compress is not None:
output_filename = utils.pathify(options.compress[0])
input_dirs = [utils.pathify(d) \
for d in options.compress[1].split(",")]
compress_miso(output_filename, input_dirs)
if options.uncompress is not None:
compressed_filename = utils.pathify(options.uncompress[0])
output_dir = utils.pathify(options.uncompress[1])
uncompress_miso(compressed_filename, output_dir)
if __name__ == "__main__":
main()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/miso_zip.py
|
miso_zip.py
|
from collections import defaultdict
import misopy
from misopy.Gene import load_genes_from_gff
import os
import time
import pysam
import binascii
import ctypes
from numpy import array
from scipy import *
# def read_to_isoforms(alignment, gene):
# """
# Align SAM read to gene's isoforms.
# """
# ##
# ## SAM format is zero-based, but GFF are 1-based, so add 1
# ## to read position
# ##
# pass
def cigar_to_end_coord(start, cigar):
"""
Compute the end coordinate based on the CIGAR string.
Assumes the coordinate is 1-based.
"""
#print start, cigar
# Parse cigar part
#for cigar_part in cigar:
# cigar_type, seq_len = cigar_part
# offset += seq_len
offset = sum([cigar_part[1] for cigar_part in cigar])
end = start + offset - 1
return end
def single_end_read_to_isoforms(read, gene, read_len, overhang_len=1):
"""
Align single-end SAM read to gene's isoforms.
"""
start = read.pos + 1
end = cigar_to_end_coord(start, read.cigar)
assert(start < end)
alignment, isoform_coords = gene.align_read_to_isoforms_with_cigar(read.cigar, start, end, read_len,
overhang_len)
return alignment
def paired_read_to_isoforms(paired_read, gene, read_len,
overhang_len=1):
"""
Align paired-end SAM read to gene's isoforms.
"""
left_read = paired_read[0]
right_read = paired_read[1]
# Convert to 1-based coordinates
left_start = left_read.pos + 1
right_start = right_read.pos + 1
# Get end coordinates of each read
left_end = cigar_to_end_coord(left_start, left_read.cigar)
assert(left_start < left_end)
right_end = cigar_to_end_coord(right_start, right_read.cigar)
assert(right_start < right_end)
alignment = None
frag_lens = None
# Throw out reads that posit a zero or less than zero fragment
# length
# print "LEFT read start,end: ", left_start, left_end
# print "RIGHT read start,end: ", right_start, right_end
if left_start > right_start:
return None, None
else:
alignment, frag_lens = gene.align_read_pair_with_cigar(
left_read.cigar, left_start, left_end,
right_read.cigar, right_start, right_end,
read_len=read_len, overhang=overhang_len)
# assert(left_start < right_start), "Reads scrambled?"
# assert(left_end < right_start), "Reads scrambled: left=(%d, %d), right=(%d, %d)" \
# %(left_start, left_end, right_start, right_end)
# print "Read: ", (left_start, left_end), " - ", (right_start, right_end)
# print " - Alignment: ", alignment, " frag_lens: ", frag_lens
# print " - Sequences: "
# print " - %s\t%s" %(left_read.seq, right_read.seq)
return alignment, frag_lens
# def paired_read_to_isoforms(paired_read, gene, read_len=36,
# overhang_len=1):
# """
# Align paired-end SAM read to gene's isoforms.
# """
# left_read = paired_read[0]
# right_read = paired_read[1]
# # Convert to 1-based coordinates
# left_start = left_read.pos + 1
# right_start = right_read.pos + 1
# # Get end coordinates of each read
# left_end = cigar_to_end_coord(left_start, left_read.cigar)
# assert(left_start < left_end)
# right_end = cigar_to_end_coord(right_start, right_read.cigar)
# assert(right_start < right_end)
# alignment = None
# frag_lens = None
# # Throw out reads that posit a zero or less than zero fragment
# # length
# if left_start > right_start or left_end > right_start:
# return None, None
# else:
# alignment, frag_lens = gene.align_read_pair(left_start, left_end,
# right_start, right_end,
# read_len=read_len,
# overhang=overhang_len)
# # assert(left_start < right_start), "Reads scrambled?"
# # assert(left_end < right_start), "Reads scrambled: left=(%d, %d), right=(%d, %d)" \
# # %(left_start, left_end, right_start, right_end)
# # print "Read: ", (left_start, left_end), " - ", (right_start, right_end)
# # print " - Alignment: ", alignment, " frag_lens: ", frag_lens
# # print " - Sequences: "
# # print " - %s\t%s" %(left_read.seq, right_read.seq)
# return alignment, frag_lens
def load_bam_reads(bam_filename,
template=None):
"""
Load a set of indexed BAM reads.
"""
print "Loading BAM filename from: %s" %(bam_filename)
bam_filename = os.path.abspath(os.path.expanduser(bam_filename))
bamfile = pysam.Samfile(bam_filename, "rb",
template=template)
return bamfile
def fetch_bam_reads_in_gene(bamfile, chrom, start, end, gene=None):
"""
Align BAM reads to the gene model.
"""
gene_reads = []
if chrom in bamfile.references:
pass
else:
chrom_parts = chrom.split("chr")
if len(chrom_parts) <= 1:
chrom = chrom_parts[0]
else:
chrom = chrom_parts[1]
try:
gene_reads = bamfile.fetch(chrom, start, end)
except ValueError:
print "Cannot fetch reads in region: %s:%d-%d" %(chrom,
start,
end)
except AssertionError:
print "AssertionError in region: %s:%d-%d" %(chrom,
start,
end)
print " - Check that your BAM file is indexed!"
return gene_reads
def flag_to_strand(flag):
"""
Takes integer flag as argument.
Returns strand ('+' or '-') from flag.
"""
if flag == 0 or not (int(bin(flag)[-5]) & 1):
return "+"
return "-"
def strip_mate_id(read_name):
"""
Strip canonical mate IDs for paired end reads, e.g.
#1, #2
or:
/1, /2
"""
if read_name.endswith("/1") or read_name.endswith("/2") or \
read_name.endswith("#1") or read_name.endswith("#2"):
read_name = read_name[0:-3]
return read_name
def pair_sam_reads(samfile, filter_reads=True,
return_unpaired=False):
"""
Pair reads from a SAM file together.
"""
paired_reads = defaultdict(list)
unpaired_reads = {}
for read in samfile:
curr_name = read.qname
# Strip canonical mate IDs
curr_name = strip_mate_id(curr_name)
if filter_reads:
# Skip reads that failed QC or are unmapped
if read.is_qcfail or read.is_unmapped or \
read.mate_is_unmapped or (not read.is_paired):
unpaired_reads[curr_name] = read
continue
paired_reads[curr_name].append(read)
to_delete = []
num_unpaired = 0
num_total = 0
for read_name, read in paired_reads.iteritems():
if len(read) != 2:
unpaired_reads[read_name] = read
num_unpaired += 1
# Delete unpaired reads
to_delete.append(read_name)
continue
left_read, right_read = read[0], read[1]
# Check that read mates are on opposite strands
left_strand = flag_to_strand(left_read.flag)
right_strand = flag_to_strand(right_read.flag)
if left_strand == right_strand:
# Skip read pairs that are on the same strand
to_delete.append(read_name)
continue
if left_read.pos > right_read.pos:
print "WARNING: %s left mate starts later than right "\
"mate" %(left_read.qname)
num_total += 1
# Delete reads that are on the same strand
for del_key in to_delete:
del paired_reads[del_key]
print "Filtered out %d read pairs that were on same strand." \
%(len(to_delete))
print "Filtered out %d reads that had no paired mate." \
%(num_unpaired)
print " - Total read pairs: %d" %(num_total)
if not return_unpaired:
return paired_reads
else:
return paired_reads, unpaired_reads
# Global variable containing CIGAR types for conversion
CIGAR_TYPES = ('M', 'I', 'D', 'N', 'S', 'H', 'P')
def sam_cigar_to_str(sam_cigar):
"""
Convert pysam CIGAR list to string format.
"""
# First element in sam CIGAR list is the CIGAR type
# (e.g. match or insertion) and the second is
# the number of nucleotides
#cigar_str = "".join(["%d%s" %(c[1], CIGAR_TYPES[c[0]]) \
# for c in sam_cigar])
#### OPTIMIZED VERSION
cigar_str = ""
if sam_cigar is None:
return cigar_str
for c in sam_cigar:
cigar_str += "%d%s" %(c[1], CIGAR_TYPES[c[0]])
return cigar_str
def read_matches_strand(read,
target_strand,
strand_rule,
paired_end=None):
"""
Check if a read matches strand.
- target_strand: the annotation strand ('+' or '-')
- strand_rule: the strand rule, i.e.
('fr-unstranded', 'fr-firststrand', or 'fr-secondstrand')
"""
if strand_rule == "fr-unstranded":
return True
matches = False
if paired_end is not None:
# Paired-end reads
read1, read2 = read
if strand_rule == "fr-firststrand":
# fr-firststrand: means that the *second* of the mates
# must match the strand
matches = (flag_to_strand(read2.flag) == target_strand)
elif strand_rule == "fr-secondstrand":
# fr-secondstrand: means that the *first* of the mates
# must match the strand
matches = (flag_to_strand(read1.flag) == target_strand)
else:
raise Exception, "Unknown strandedness rule."
else:
# Single-end reads
if strand_rule == "fr-firststrand":
# fr-firststrand: We sequence the first read only, so it must
# *NOT* match the target strand
matches = (flag_to_strand(read.flag) != target_strand)
elif strand_rule == "fr-secondstrand":
# fr-secondstrand: We only sequence the first read, which
# is supposed to match the target strand
matches = (flag_to_strand(read.flag) == target_strand)
else:
raise Exception, "Unknown strandedness rule."
return matches
def sam_parse_reads(samfile,
paired_end=False,
strand_rule=None,
target_strand=None):
"""
Parse the SAM reads. If paired-end, pair up the mates
together.
Also forces the strandedness convention, discarding
reads that do not match the correct strand.
- strand_rule: specifies the strandedness convention. Can be
'fr-unstranded', 'fr-firststrand' or 'fr-secondstrand'.
- target_strand: specifies the strand to match, i.e. the
annotation strand. Can be '+' or '-'.
"""
read_positions = []
read_cigars = []
num_reads = 0
check_strand = True
# Determine if we need to check strandedness of reads.
# If we're given an unstranded convention, or if we're
# not given a target strand, then assume that there's
# no need to check strandedness.
if (strand_rule is None) or \
(strand_rule is "fr-unstranded") or \
(target_strand is None):
# No need to check strand
check_strand = False
# Track number of reads discarded due to strand
# violations, if strand-specific
num_strand_discarded = 0
if paired_end:
# Pair up the reads
paired_reads = pair_sam_reads(samfile)
# Process reads into format required by fastmiso
# MISO C engine requires pairs to follow each other in order.
# Unpaired reads are not supported.
for read_id, read_info in paired_reads.iteritems():
if check_strand:
# Check strand
if not read_matches_strand(read_info,
target_strand,
strand_rule,
paired_end=paired_end):
# Skip reads that don't match strand
num_strand_discarded += 1
continue
read1, read2 = read_info
if (read1.cigar is None) or (read2.cigar is None):
continue
# Read positions and cigar strings are collected
read_positions.append(int(read1.pos))
read_positions.append(int(read2.pos))
read_cigars.append(sam_cigar_to_str(read1.cigar))
read_cigars.append(sam_cigar_to_str(read2.cigar))
num_reads += 1
else:
# Single-end
for read in samfile:
if read.cigar is None:
continue
if check_strand:
if not read_matches_strand(read,
target_strand,
strand_rule,
paired_end=paired_end):
# Skip reads that don't match strand
num_strand_discarded += 1
continue
read_positions.append(int(read.pos))
read_cigars.append(sam_cigar_to_str(read.cigar))
num_reads += 1
if check_strand:
print "No. reads discarded due to strand violation: %d" \
%(num_strand_discarded)
reads = (tuple(read_positions),
tuple(read_cigars))
return reads, num_reads
def sam_pe_reads_to_isoforms(samfile, gene, read_len, overhang_len):
"""
Align read pairs (from paired-end data set) to gene.
Returns alignment of paired-end reads (with insert lengths)
to gene and number of read pairs aligned.
"""
paired_reads = pair_sam_reads(samfile)
num_read_pairs = 0
pe_reads = []
k = 0
for read_id, read_pair in paired_reads.iteritems():
if len(read_pair) != 2:
# Skip reads with no pair
continue
alignment, frag_lens = paired_read_to_isoforms(read_pair, gene,
read_len, overhang_len)
# Skip reads that are not consistent with any isoform
if any(array(alignment) == 1):
pe_reads.append([alignment, frag_lens])
num_read_pairs += 1
else:
# print "read %s inconsistent with all isoforms" %(read_id)
k += 1
print "Filtered out %d reads that were not consistent with any isoform" %(k)
return pe_reads, num_read_pairs
def sam_se_reads_to_isoforms(samfile, gene, read_len,
overhang_len):
"""
Align single-end reads to gene.
"""
num_reads = 0
alignments = []
num_skipped = 0
for read in samfile:
alignment = single_end_read_to_isoforms(read, gene, read_len,
overhang_len)
if 1 in alignment:
# If the read aligns to at least one of the isoforms, keep it
alignments.append(alignment)
num_reads += 1
else:
num_skipped += 1
print "Skipped total of %d reads." %(num_skipped)
return alignments, num_reads
def sam_reads_to_isoforms(samfile, gene, read_len, overhang_len,
paired_end=False):
"""
Align BAM reads to the gene model.
"""
print "Aligning reads to gene..."
t1 = time.time()
if paired_end != None:
# Paired-end reads
reads, num_reads = sam_pe_reads_to_isoforms(samfile, gene, read_len,
overhang_len)
else:
# Single-end reads
reads, num_reads = sam_se_reads_to_isoforms(samfile, gene, read_len,
overhang_len)
t2 = time.time()
print "Alignment to gene took %.2f seconds (%d reads)." %((t2 - t1),
num_reads)
return reads
def main():
pass
if __name__ == "__main__":
main()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/sam_utils.py
|
sam_utils.py
|
from scipy import *
from numpy.random import multinomial, binomial, negative_binomial, normal, randint
import misopy
from misopy.parse_csv import find
def print_reads_summary(reads, gene, paired_end=False):
num_isoforms = len(gene.isoforms)
computed_const = False
num_constitutive_reads = 0
for n in range(num_isoforms):
unambig_read = zeros(num_isoforms, dtype=int)
unambig_read[n] = 1
num_iso_reads = 0
for r in reads:
if paired_end:
curr_read = r[0]
else:
curr_read = r
if all(curr_read == unambig_read):
num_iso_reads += 1
if not computed_const:
# If didn't compute so already, calculate how many reads
# are constitutive, i.e. consistent with all isoforms
if all(array(curr_read) == 1):
num_constitutive_reads += 1
computed_const = True
print "Iso %d (len = %d): %d unambiguous supporting reads" %(n, gene.isoforms[n].len,
num_iso_reads)
print "No. constitutive reads (consistent with all): %d" %(num_constitutive_reads)
def get_reads_summary(reads):
if reads.ndim != 2:
raise Exception, "get_reads_summary only defined for two-isoform."
ni = 0
ne = 0
nb = 0
for read in reads:
if read[0] == 1 and read[1] == 0:
# NI read
ni += 1
elif read[0] == 0 and read[1] == 1:
# NE read
ne += 1
elif read[0] == 1 and read[1] == 1:
nb += 1
return (ni, ne, nb)
def expected_read_summary(gene, true_psi, num_reads, read_len, overhang_len):
"""
Computed the expected number of NI, NE, NB and number of reads excluded
due to overhang constraints. Note that:
NI + NE + NB = number of reads not excluded by overhang
= 1 - reads excluded by overhang
"""
# Compute probability of overhang violation:
# p(oh violation) = p(oh violation | isoform1)p(isoform1) + p(oh violation | isoform2)p(isoform2)
##
## Assumes first isoform has 3 exons, second has 2 exons
##
parts = gene.parts
iso1_seq = gene.isoforms[0].seq
iso2_seq = gene.isoforms[1].seq
num_pos_iso1 = len(iso1_seq) - read_len + 1
num_pos_iso2 = len(iso2_seq) - read_len + 1
psi_f = (true_psi*num_pos_iso1)/((true_psi*num_pos_iso1) + ((1-true_psi)*num_pos_iso2))
##
## Try a version that takes into account overhang!
##
p_oh_violation = (((2*(overhang_len-1)*2)/float(num_pos_iso1))*psi_f) + \
((2*(overhang_len-1)/float(num_pos_iso2))*(1-psi_f))
# Compute probability of inclusion read:
# p(NI) = p(isoform 1)p(inclusion read | isoform 1)
skipped_exon_len = gene.get_part_by_label('B').len
p_NI = psi_f*(((skipped_exon_len - read_len + 1) + 2*(read_len + 1 - 2*overhang_len)) / \
float(len(iso1_seq) - read_len + 1))
# Compute probability of exclusion read:
# p(NE) = p(isoform 2)p(exclusion read | isoform 2)
p_NE = (1-psi_f)*((read_len + 1 - (2*overhang_len))/float(len(iso2_seq) - read_len + 1))
# Compute probability of read supporting both:
# p(NB) = p(isoform 1)p(NB read | isoform 1) + p(isoform 2)p(NB read | isoform 2)
num_NB = (gene.get_part_by_label('A').len - read_len + 1) + (gene.get_part_by_label('C').len - read_len + 1)
p_NB = psi_f*((num_NB)/float(len(iso1_seq) - read_len + 1)) + \
(1-psi_f)*(num_NB/float(len(iso2_seq) - read_len + 1))
print "p_NI: %.5f, p_NE: %.5f, p_NB: %.5f" %(p_NI, p_NE, p_NB)
return [p_NI*num_reads, p_NE*num_reads, p_NB*num_reads, p_oh_violation*num_reads]
def simulate_two_iso_reads(gene, true_psi, num_reads, read_len, overhang_len,
p_ne_loss=0, p_ne_gain=0, p_ni_loss=0, p_ni_gain=0):
"""
Return a list with an element for each isoform, saying whether the read could have
been generated by that isoform (denoted 1) or not (denoted 0).
"""
if len(gene.isoforms) != 2:
raise Exception, "simulate_two_iso_reads requires a gene with only two isoforms."
if len(true_psi) < 2:
raise Exception, "Simulate reads requires a probability vector of size > 2."
reads_summary = [0, 0, 0]
all_reads = []
categories = []
true_isoforms = []
noise_probs = array([p_ne_loss, p_ne_gain, p_ni_loss, p_ni_gain])
noisify = any(noise_probs > 0)
noiseless_counts = [0, 0, 0]
for k in range(0, num_reads):
reads_sampled = sample_random_read(gene, true_psi, read_len, overhang_len)
read_start, read_end = reads_sampled[1]
category = reads_sampled[2]
chosen_iso = reads_sampled[3]
reads_sampled = reads_sampled[0]
single_read = (reads_sampled[0] + reads_sampled[2], reads_sampled[1] + reads_sampled[2])
NI, NE, NB = reads_sampled
# If read was not thrown out due to overhang, include it
if any(array(single_read) != 0):
noiseless_counts[0] += NI
noiseless_counts[1] += NE
noiseless_counts[2] += NB
# Check if read was chosen to be noised
if noisify:
# If exclusive isoform was sampled and we decided to noise it, discard the read
if (p_ne_loss > 0) and (chosen_iso == 1) and (rand() < p_ne_loss):
# Note that in this special case of a gene with two isoforms,
# 'reads_sampled' is a read summary tuple of the form (NI, NE, NB)
# and not an alignment to the two isoforms.
if reads_sampled[1] == 1:
# If read came from exclusion junction (NE), discard it
continue
if (p_ne_gain > 0) and (chosen_iso == 1) and (rand() < p_ne_gain):
if reads_sampled[1] == 1:
# Append read twice
all_reads.extend([single_read, single_read])
# Find what category read landed in and increment it
cat = reads_sampled.index(1)
reads_sampled[cat] += 1
all_reads.append(single_read)
categories.append(category)
prev_reads_summary = reads_summary
reads_summary[0] += reads_sampled[0]
reads_summary[1] += reads_sampled[1]
reads_summary[2] += reads_sampled[2]
true_isoforms.append(chosen_iso)
# if p_ne_gain > 0:
# print "--> No noise NI: %d, NE: %d, NB: %d" %(noiseless_counts[0], noiseless_counts[1],
# noiseless_counts[2])
# print " noised: ", reads_summary
all_reads = array(all_reads)
return (reads_summary, all_reads, categories, true_isoforms)
def simulate_reads(gene, true_psi, num_reads, read_len, overhang_len):
"""
Return a list of reads. Each read is a vector of the size of the number of isoforms, with 1
if the read could have come from the isoform and 2 otherwise.
"""
if type(true_psi) != list:
raise Exception, "simulate_reads: expects true_psi to be a probability vector summing to 1."
if len(gene.isoforms) == 2:
raise Exception, "simulate_reads: should use simulate_two_iso_reads for genes with only two isoforms."
if sum(true_psi) != 1:
raise Exception, "simulate_reads: true_psi must sum to 1."
all_reads = []
read_coords = []
if len(true_psi) < 2:
raise Exception, "Simulate reads requires a probability vector of size > 2."
for k in range(0, num_reads):
reads_sampled = sample_random_read(gene, true_psi, read_len, overhang_len)
alignment = reads_sampled[0]
read_start, read_end = reads_sampled[1]
category = reads_sampled[2]
reads_sampled = reads_sampled[0]
if any(alignment != 0):
# If read was not thrown out due to overhang, include it
all_reads.append(alignment)
read_coords.append((read_start, read_end))
all_reads = array(all_reads)
return (all_reads, read_coords)
def check_paired_end_read_consistency(reads):
"""
Check that a set of reads are consistent with their fragment lengths,
i.e. that reads that do not align to an isoform have a -Inf fragment length,
and reads that are alignable to an isoform do not have a -Inf fragment length.
"""
pe_reads = reads[:, 0]
frag_lens = reads[:, 1]
num_reads = len(pe_reads)
print "Checking read consistency for %d reads..." %(num_reads)
print reads
is_consistent = False
is_consistent = all(frag_lens[nonzero(pe_reads == 1)] != -Inf)
if not is_consistent:
return is_consistent
is_consistent = all(frag_lens[nonzero(pe_reads == 0)] == -Inf)
return is_consistent
##
## Diffrent fragment length distributions.
##
def sample_binomial_frag_len(frag_mean=200, frag_variance=100):
"""
Sample a fragment length from a binomial distribution parameterized with a
mean and variance.
If frag_variance > frag_mean, use a Negative-Binomial distribution.
"""
assert(abs(frag_mean - frag_variance) > 1)
if frag_variance < frag_mean:
p = 1 - (frag_variance/float(frag_mean))
# N = mu/(1-(sigma^2/mu))
n = float(frag_mean) / (1 - (float(frag_variance)/float(frag_mean)))
return binomial(n, p)
else:
r = -1 * (power(frag_mean, 2)/float(frag_mean - frag_variance))
p = frag_mean / float(frag_variance)
print "Sampling frag_mean=",frag_mean, " frag_variance=", frag_variance
print "r: ",r, " p: ", p
return negative_binomial(r, p)
def compute_rpkc(list_read_counts, const_region_lens, read_len):
"""
Compute the RPKC (reads per kilobase of constitutive region) for the set of constitutive regions.
These are assumed to be constitutive exon body regions (not including constitutive junctions.)
"""
num_mappable_pos = 0
# assert(len(list_read_counts) == len(const_region_lens))
for region_len in const_region_lens:
num_mappable_pos += region_len - read_len + 1
read_counts = sum(list_read_counts)
rpkc = read_counts / (num_mappable_pos / 1000.)
return rpkc
def sample_normal_frag_len(frag_mean, frag_variance):
"""
Sample a fragment length from a rounded 'discretized' normal distribution.
"""
frag_len = round(normal(frag_mean, sqrt(frag_variance)))
return frag_len
def simulate_paired_end_reads(gene, true_psi, num_reads, read_len, overhang_len, mean_frag_len,
frag_variance, bino_sampling=False):
"""
Return a list of reads that are aligned to isoforms.
This list is a pair, where the first element is a list of read alignments
and the second is a set of corresponding fragment lengths for each alignment.
"""
if sum(true_psi) != 1:
raise Exception, "simulate_reads: true_psi must sum to 1."
# sample reads
reads = []
read_coords = []
assert(frag_variance != None)
sampled_frag_lens = []
for k in range(0, num_reads):
# choose a fragment length
insert_len = -1
while insert_len < 0:
if bino_sampling:
frag_len = sample_binomial_frag_len(frag_mean=mean_frag_len, frag_variance=frag_variance)
else:
frag_len = sample_normal_frag_len(frag_mean=mean_frag_len, frag_variance=frag_variance)
insert_len = frag_len - (2 * read_len)
if insert_len < 0:
raise Exception, "Sampled fragment length that is shorter than 2 * read_len!"
#print "Sampled fragment length that is shorter than 2 * read_len!"
sampled_frag_lens.append(frag_len)
reads_sampled = sample_random_read_pair(gene, true_psi, read_len, overhang_len, insert_len, mean_frag_len)
alignment = reads_sampled[0]
frag_lens = reads_sampled[1]
read_coords.append(reads_sampled[2])
reads.append([alignment, frag_lens])
return (array(reads), read_coords, sampled_frag_lens)
# def compute_read_pair_position_prob(iso_len, read_len, insert_len):
# """
# Compute the probability that the paired end read of the given read length and
# insert size will start at each position of the isoform (uniform.)
# """
# read_start_prob = zeros(iso_len)
# # place a 1 in each position if a read could start there (0-based index)
# for start_position in range(iso_len):
# # total read length, including insert length and both mates
# paired_read_len = 2*read_len + insert_len
# if start_position + paired_read_len <= iso_len:
# read_start_prob[start_position] = 1
# # renormalize ones to get a probability vector
# possible_positions = nonzero(read_start_prob)[0]
# if len(possible_positions) == 0:
# return read_start_prob
# num_possible_positions = len(possible_positions)
# read_start_prob[possible_positions] = 1/float(num_possible_positions)
# return read_start_prob
def compute_read_pair_position_prob(iso_len, read_len, frag_len):
"""
Compute the probability that the paired end read of the given fragment length
will start at each position of the isoform (uniform.)
"""
read_start_prob = zeros(iso_len)
# place a 1 in each position if a read could start there (0-based index)
for start_position in range(iso_len):
# total read length, including insert length and both mates
if start_position + frag_len - 1 <= iso_len - 1:
read_start_prob[start_position] = 1
# renormalize ones to get a probability vector
possible_positions = nonzero(read_start_prob)[0]
if len(possible_positions) == 0:
return read_start_prob
num_possible_positions = len(possible_positions)
read_start_prob[possible_positions] = 1/float(num_possible_positions)
return read_start_prob
def sample_random_read_pair(gene, true_psi, read_len, overhang_len, insert_len, mean_frag_len):
"""
Sample a random paired-end read (not taking into account overhang) from the
given a gene, the true Psi value, read length, overhang length and the insert length (fixed).
A paired-end read is defined as (genomic_left_read_start, genomic_left_read_end,
genomic_right_read_start, genomic_right_read_start).
Note that if we're given a gene that has only two isoforms, the 'align' function of
gene will return a read summary in the form of (NI, NE, NB) rather than an alignment to
the two isoforms (which is a pair (0/1, 0/1)).
"""
iso_lens = [iso.len for iso in gene.isoforms]
num_positions = array([(l - mean_frag_len + 1) for l in iso_lens])
# probability of sampling a particular position from an isoform -- assume uniform for now
iso_probs = [1/float(n) for n in num_positions]
psi_frag_denom = sum(num_positions * array(true_psi))
psi_frags = [(num_pos * curr_psi)/psi_frag_denom for num_pos, curr_psi \
in zip(num_positions, true_psi)]
# Choose isoform to sample read from
chosen_iso = list(multinomial(1, psi_frags)).index(1)
iso_len = gene.isoforms[chosen_iso].len
frag_len = insert_len + 2*read_len
isoform_position_probs = compute_read_pair_position_prob(iso_len, read_len, frag_len)
# sanity check
left_read_start = list(multinomial(1, isoform_position_probs)).index(1)
left_read_end = left_read_start + read_len - 1
# right read starts after the left read and the insert length
right_read_start = left_read_start + read_len + insert_len
right_read_end = left_read_start + (2*read_len) + insert_len - 1
# convert read coordinates from coordinates of isoform that generated it to genomic coordinates
genomic_left_read_start, genomic_left_read_end = \
gene.isoforms[chosen_iso].isoform_coords_to_genomic(left_read_start,
left_read_end)
genomic_right_read_start, genomic_right_read_end = \
gene.isoforms[chosen_iso].isoform_coords_to_genomic(right_read_start,
right_read_end)
# parameterized paired end reads as the start coordinate of the left
pe_read = (genomic_left_read_start, genomic_left_read_end,
genomic_right_read_start, genomic_right_read_end)
alignment, frag_lens = gene.align_read_pair(pe_read[0], pe_read[1], pe_read[2], pe_read[3],
overhang=overhang_len)
return (alignment, frag_lens, pe_read)
def sample_random_read(gene, true_psi, read_len, overhang_len):
"""
Sample a random read (not taking into account overhang) from the
given set of exons and the true Psi value.
Note that if we're given a gene that has only two isoforms, the 'align' function of
gene will return a read summary in the form of (NI, NE, NB) rather than an alignment to
the two isoforms (which is a pair (0/1, 0/1)).
"""
iso_lens = [iso.len for iso in gene.isoforms]
num_positions = array([(l - read_len + 1) for l in iso_lens])
# probability of sampling a particular position from an isoform -- assume uniform for now
iso_probs = [1/float(n) for n in num_positions]
psi_frag_denom = sum(num_positions * array(true_psi))
psi_frags = [(num_pos * curr_psi)/psi_frag_denom for num_pos, curr_psi \
in zip(num_positions, true_psi)]
# Choose isoform to sample read from
chosen_iso = list(multinomial(1, psi_frags)).index(1)
isoform_position_prob = ones(num_positions[chosen_iso]) * iso_probs[chosen_iso]
sampled_read_start = list(multinomial(1, isoform_position_prob)).index(1)
sampled_read_end = sampled_read_start + read_len - 1
# seq = gene.isoforms[chosen_iso].seq[sampled_read_start:sampled_read_end]
# alignment, category = gene.align(seq, overhang=overhang_len)
##
## Trying out new alignment method
##
# convert coordinates to genomic
genomic_read_start, genomic_read_end = \
gene.isoforms[chosen_iso].isoform_coords_to_genomic(sampled_read_start,
sampled_read_end)
alignment, category = gene.align_read(genomic_read_start, genomic_read_end, overhang=overhang_len)
return (tuple(alignment), [sampled_read_start, sampled_read_end], category, chosen_iso)
def read_counts_to_read_list(ni, ne, nb):
"""
Convert a set of read counts for a two-isoform gene (NI, NE, NB) to a list of reads.
"""
reads = []
reads.extend(ni * [[1, 0]])
reads.extend(ne * [[0, 1]])
reads.extend(nb * [[1, 1]])
return array(reads)
# def sample_random_read(gene, true_psi, read_len, overhang_len):
# """
# Sample a random read (not taking into account overhang) from the
# given set of exons and the given true Psi value.
# """
# iso1_len = gene.isoforms[0]['len']
# iso2_len = gene.isoforms[1]['len']
# num_inc = 0
# num_exc = 0
# num_both = 0
# num_positions_iso1 = iso1_len - read_len + 1
# num_positions_iso2 = iso2_len - read_len + 1
# p1 = 1/float(num_positions_iso1)
# p2 = 1/float(num_positions_iso2)
# psi_frag = (num_positions_iso1*true_psi)/((num_positions_iso1*true_psi + num_positions_iso2*(1-true_psi)))
# # Choose isoform to sample read from
# if rand() < psi_frag:
# isoform_position_prob = ones(num_positions_iso1)*p1
# sampled_read_start = list(multinomial(1, isoform_position_prob)).index(1)
# sampled_read_end = sampled_read_start + read_len
# seq = gene.isoforms[0]['seq'][sampled_read_start:sampled_read_end]
# [n1, n2, nb], category = gene.align_two_isoforms(seq, overhang=overhang_len)
# return [[n1, n2, nb], [sampled_read_start, sampled_read_end], category]
# else:
# isoform_position_prob = ones(num_positions_iso2)*p2
# sampled_read_start = list(multinomial(1, isoform_position_prob)).index(1)
# sampled_read_end = sampled_read_start + read_len
# seq = gene.isoforms[1]['seq'][sampled_read_start:sampled_read_end]
# [n1, n2, nb], category = gene.align_two_isoforms(seq, overhang=overhang_len)
# return [[n1, n2, nb], [sampled_read_start, sampled_read_end], category]
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/read_simulator.py
|
read_simulator.py
|
import time
import os
import misopy
import misopy.misc_utils as utils
def check_module_availability(required_modules):
unavailable_mods = 0
print "Checking availability of Python modules for MISO"
print "Looking for required Python modules.."
for module_name in required_modules:
print "Checking for availability of: %s" %(module_name)
try:
__import__(module_name)
# Manually check for correct matplotlib version
# required for sashimi_plot
if module_name == "matplotlib":
import matplotlib.pyplot as plt
if not hasattr(plt, "subplot2grid"):
print "WARNING: subplot2grid function is not available in matplotlib. " \
"to use sashimi_plot, you must upgrade your matplotlib " \
"to version 1.1.0 or later. This function is *not* required " \
"for MISO use."
except ImportError:
print " - Module %s not available!" %(module_name)
if module_name == "matplotlib":
print "matplotlib is required for sashimi_plot"
unavailable_mods += 1
if unavailable_mods != 0:
print "Total of %d modules were not available. " \
"Please install these and try again." %(unavailable_mods)
else:
print "All modules are available!"
print "Looking for required executables.."
required_programs = ["samtools", "bedtools"]
for prog in required_programs:
p = utils.which(prog)
print "Checking if %s is available" %(prog)
if p is None:
print " - Cannot find %s!" %(prog)
if prog == "bedtools":
print "bedtools is only required for prefiltering " \
"and computation of insert lengths."
if utils.which("tagBam"):
print "Your bedtools installation might be available " \
"but outdated. Please upgrade bedtools and " \
"ensure that \'bedtools\' is available on path."
else:
print " - %s is available" %(prog)
return unavailable_mods
def main():
required_modules = ['numpy', 'scipy', 'json', 'matplotlib',
'pysam']
check_module_availability(required_modules)
if __name__ == '__main__':
main()
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/module_availability.py
|
module_availability.py
|
import random as pyrand
from scipy import *
from numpy import *
import re
import misopy
from misopy.gff_utils import *
from misopy.parse_csv import *
import pprint
class Interval:
def __init__(self, start, end):
self.start = start
self.end = end
assert(self.start <= self.end)
self.len = self.end - self.start + 1
assert(self.len >= 1)
def __repr__(self):
return "Interval([%d, %d])" %(self.start, self.end)
def __eq__(self, interval):
if interval == None: return False
return self.start == interval.start and self.end == interval.end
def __ne__(self, interval):
return not self.__eq(interval)
def __lt__(self, interval):
if self.start == interval.start:
return self.end < interval.end
return self.start < interval.start
def contains(self, start, end):
if self.start <= start and self.end >= end:
return True
return False
def intersects(self, other):
if (self.start < other.end
and self.end > other.start):
return True
return False
class Exon(Interval):
def __init__(self, start, end, label=None, gene=None, seq="",
from_gff_record=None):
Interval.__init__(self, start, end)
self.gene = gene
self.label = label
self.seq = seq
if self.seq != "":
assert(len(self.seq) == len(self.len))
if from_gff_record != None:
# Load information from a GFF record
self.load_from_gff_record(from_gff_record)
def load_from_gff_record(self, gff_record):
"""
Load exon information from given GFF exon record.
"""
self.rec = gff_record['record']
self.parent_rec = gff_record['parent']
self.start = self.rec.start
self.end = self.rec.end
# Use first ID in list
self.label = self.rec.attributes['ID'][0]
def __repr__(self):
gene_label = None
if self.gene:
gene_label = self.gene.label
return "Exon([%d, %d], id = %s, seq = %s)(ParentGene = %s)" %(self.start, self.end,
self.label, self.seq,
gene_label)
def __eq__(self, other):
if other == None: return False
# TEST -- removing sequence equality
if self.start == other.start and self.end == other.end \
and self.gene == other.gene:
return True
#if self.seq == other.seq and self.start == other.start and self.end == other.end \
# and self.gene == other.gene:
# return True
return False
class Intron(Interval):
def __init__(self, start, end, label=None, gene=None, seq=""):
Interval.__init__(self, start, end)
self.gene = gene
self.label = label
self.seq = seq
if self.seq != "":
assert(len(seq) == len(self.len))
def __repr__(self):
gene_label = None
if self.gene:
gene_label = self.gene.label
return "Intron([%d, %d], id = %s)(ParentGene = %s)" %(self.start, self.end,
self.label, self.seq,
self.gene_label)
def __eq__(self, other):
if other == None: return False
if self.seq == other.seq and self.start == other.start and self.end == other.end \
and self.gene == other.gene:
return True
return False
class Gene:
"""
A representation of a gene and its isoforms. If a gene has two isoforms, make the inclusive
isoform the first.
Isoforms are made up of parts, which might be exons or introns.
isoform_desc is a of lists describing the structure of the isoforms, e.g.
[['A', 'B', 'A'], ['A', 'A']]
which creates two isoforms, composed of the 'A' and 'B' parts.
"""
def __init__(self, isoform_desc, parts,
chrom=None,
exons_seq=None,
label="",
strand="NA",
transcript_ids=None):
self.isoform_desc = isoform_desc
self.iso_lens = []
if label == "":
self.label = self.get_rand_id(5)
else:
self.label = label
self.parts = self.create_parts(parts)
self.isoforms = []
self.num_parts = len(self.parts)
self.chrom = chrom
self.strand = strand
self.transcript_ids = transcript_ids
# create a set of isoforms
self.create_isoforms()
# Use transcript ids if given
self.assign_transcript_ids()
# The number of exons in each isoform
self.num_parts_per_isoform = array([iso.num_parts for iso in self.isoforms])
def isoform_has_part(self, isoform, part):
"""
Return True if isoform has part, otherwise False.
"""
for iso_part in isoform.parts:
if iso_part.start == part.start and \
iso_part.end == part.end:
return True
return False
def get_const_parts(self):
"""
Return all of the gene's constitutive parts, i.e. regions that are shared across all isoforms.
"""
const_parts = []
for part in self.parts:
add_part = True
for isoform in self.isoforms:
if not self.isoform_has_part(isoform, part):
add_part = False
# if part not in isoform.parts:
# add_part = False
if add_part:
const_parts.append(part)
# if len(const_parts) == 0:
# raise Exception, "Gene %s has no constitutive parts!" %(str(self))
return const_parts
def get_alternative_parts(self):
"""
Return all of the gene's alternative parts, i.e. non-constitutive regions.
"""
const_parts = self.get_const_parts()
alternative_parts = []
for part in self.parts:
if part not in const_parts:
alternative_parts.append(part)
return alternative_parts
def get_rand_id(self, len):
rand_id = 'G' + "".join([pyrand.choice('abcdefghijklmnopqrstuvwxyz') for n in range(len)])
return rand_id
def get_parts_before(self, part):
"""
Return all the parts the given part in the gene.
"""
parts_before = []
for p in self.parts:
if p == None:
raise Exception, "Attempting to reference a None part in %s, (part = %s)" \
%(str(self), str(part))
if p.end < part.start:
parts_before.append(p)
else:
return parts_before
return parts_before
def get_part_number(self, part):
"""
Return a part's position (i.e. its number) in the list of parts (0-based).
"""
return self.parts.index(part)
def get_genomic_parts_crossed(self, start, end, read_len=None):
"""
Return all parts (by number!) that are crossed in the genomic interval [start, end],
not including the parts where start and end land.
If read_len is given, take that into account when considering whether a part was crossed.
"""
# find the part where the first coordinate is
start_part = self.get_part_by_coord(start)
end_part = self.get_part_by_coord(end)
##
## NEW: Reads that do not cross any parts might be
## intronic reads
##
if start_part == None or end_part == None:
return []
start_part_num = self.parts.index(start_part)
end_part_num = self.parts.index(end_part)
# find parts crossed in between start and end
parts_crossed = range(start_part_num + 1, end_part_num)
if read_len != None:
if (end - start) <= read_len:
return parts_crossed
return parts_crossed
def get_part_by_label(self, part_label):
for part in self.parts:
if part_label == part.label:
return part
return None
def part_coords_to_genomic(self, part, part_start, part_end=None):
"""
Map the coordinates (start, end) inside part into their corresponding genomic coordinates.
The end coordinate is optional.
If the given part is the first part in the gene, then these two coordinate systems
are equivalent.
"""
# find parts before the given part and sum their coordinates
parts_before_len = sum([p.len for p in self.get_parts_before(part)])
genomic_start = parts_before_len + part_start
if part_end:
genomic_end = parts_before_len + part_end
return (genomic_start, genomic_end)
return genomic_start
def get_part_by_coord(self, genomic_start):
"""
Return the part that contains the given genomic start coordinate.
"""
for part in self.parts:
if part.start <= genomic_start and genomic_start <= part.end:
return part
return None
def genomic_coords_to_isoform(self, isoform, genomic_start, genomic_end):
"""
Get isoform coords and return genomic coords.
"""
assert(isoform in self.isoforms)
# ensure that the parts the coordinates map to are in the isoform
start_part = self.get_part_by_coord(genomic_start)
end_part = self.get_part_by_coord(genomic_end)
if start_part == None or end_part == None:
return None, None
# find the isoform coordinates of the corresponding parts
isoform_start = isoform.part_coord_to_isoform(genomic_start)
isoform_end = isoform.part_coord_to_isoform(genomic_end)
return (isoform_start, isoform_end)
def create_parts(self, parts):
part_counter = 0
gene_parts = []
for part in parts:
gene_part = part
gene_part.gene = self
gene_parts.append(gene_part)
part_counter += 1
return gene_parts
def create_isoforms(self):
self.isoforms = []
for iso in self.isoform_desc:
isoform_parts = []
isoform_seq = ""
for part_label in iso:
# retrieve part
part = self.get_part_by_label(part_label)
if not part:
raise Exception, "Invalid description of isoforms: refers to undefined part %s, %s, gene: %s" \
%(part, part_label, self.label)
isoform_parts.append(part)
isoform_seq += part.seq
# make isoform with the given parts
isoform = Isoform(self, isoform_parts, seq=isoform_seq)
isoform.desc = iso
self.isoforms.append(isoform)
self.iso_lens.append(isoform.len)
self.iso_lens = array(self.iso_lens)
def assign_transcript_ids(self):
"""
Assign transcript IDs to isoforms.
"""
if self.transcript_ids != None:
if len(self.transcript_ids) != len(self.isoforms):
raise Exception, "Transcript IDs do not match number of isoforms."
for iso_num, iso in enumerate(self.isoforms):
curr_iso = self.isoforms[iso_num]
curr_iso.label = self.transcript_ids[iso_num]
def set_sequence(self, exon_id, seq):
"""
Set the sequence of the passed in exons to be the given sequence.
"""
return Exception, "Unimplemented method."
def align_read_pair_with_cigar(self, left_cigar, genomic_left_read_start,
genomic_left_read_end, right_cigar,
genomic_right_read_start,
genomic_right_read_end, read_len,
overhang=1):
alignment = []
iso_frag_lens = []
left = self.align_read_to_isoforms_with_cigar(
left_cigar, genomic_left_read_start, genomic_left_read_end,
read_len, overhang)
right = self.align_read_to_isoforms_with_cigar(
right_cigar, genomic_right_read_start, genomic_right_read_end,
read_len, overhang)
for lal, lco, ral, rco in zip(left[0], left[1], right[0], right[1]):
if lal and ral:
alignment.append(1)
iso_frag_lens.append(rco[1]-lco[0]+1)
else:
alignment.append(0)
iso_frag_lens.append(-Inf)
return (alignment, iso_frag_lens)
# def align_read_pair(self, genomic_left_read_start, genomic_left_read_end,
# genomic_right_read_start, genomic_right_read_end, overhang=1,
# read_len=36):
# """
# Align a paired-end read to all of the gene's isoforms.
# Return an alignment binary vector of length K, where K is the number of isoforms, as well as
# a vector with the fragment lengths that correspond to each alignment to an isoform.
# When a read does not align to a particular isoform, the fragment length for that alignment is
# denoted with -Inf.
# """
# # align each of the pairs independently to all isoforms, and then compute the fragment
# # lengths that correspond to each alignment
# alignment = []
# iso_frag_lens = []
# # print "Aligning: ", (genomic_left_read_start, genomic_left_read_end), \
# # " - ", (genomic_right_read_start, genomic_right_read_end)
# # align left read
# (left_alignment, left_isoform_coords) = self.align_read_to_isoforms(genomic_left_read_start, genomic_left_read_end,
# overhang=overhang, read_len=read_len)
# # align right read
# (right_alignment, right_isoform_coords) = self.align_read_to_isoforms(genomic_right_read_start,
# genomic_right_read_end,
# overhang=overhang, read_len=read_len)
# num_isoforms = len(self.isoforms)
# for n in range(num_isoforms):
# # check that both reads align to the isoform with no overhang violation
# if not (left_alignment[n] == right_alignment[n] and right_alignment[n] != 0):
# alignment.append(0)
# iso_frag_lens.append(-Inf)
# continue
# # else:
# # print "Not both reads align to the same isoform"
# # compute fragment length for each isoform, which is the isoform start coordinate of
# # the right read minus the isoform start coordinate of the left read
# frag_len = right_isoform_coords[n][1] - left_isoform_coords[n][0] + 1
# if frag_len < 0:
# # negative fragment length
# print "Warning: Negative fragment length during alignment of ", genomic_left_read_start, \
# genomic_right_read_start, " to isoform: ", self.isoforms[n]
# alignment.append(0)
# iso_frag_lens.append(-Inf)
# continue
# # both reads align with no overhang violations and the fragment length is reasonable
# alignment.append(1)
# iso_frag_lens.append(frag_len)
# return (alignment, iso_frag_lens)
def align_reads_to_isoforms(self, read_genomic_coords, overhang=1, read_len=36):
alignments = []
isoforms_coords = []
for read_coords in read_genomic_coords:
genomic_read_start, genomic_read_end = read_coords
alignment, isoform_coords = self.align_read_to_isoforms(genomic_read_start, genomic_read_end,
overhang=overhang, read_len=read_len)
alignments.append(alignment)
isoforms_coords.append(isoform_coords)
return (array(alignments), isoforms_coords)
def align_read_to_isoforms_with_cigar(self, cigar, genomic_read_start,
genomic_read_end, read_len, overhang_len):
"""
Align a single-end read to all of the gene's isoforms.
Use the cigar string of the read to determine whether an
isoform matches
"""
alignment = []
isoform_coords = []
for isoform in self.isoforms:
iso_read_start, iso_read_end = self.genomic_coords_to_isoform(isoform,
genomic_read_start,
genomic_read_end)
isocigar = isoform.get_local_cigar(genomic_read_start, read_len)
# Check that read is consistent with isoform and that the overhang
# constraint is met
if (isocigar and isocigar == cigar) and \
isoform.cigar_overhang_met(isocigar, overhang_len):
alignment.append(1)
isoform_coords.append((iso_read_start, iso_read_end))
else:
alignment.append(0)
isoform_coords.append(None)
return (alignment, isoform_coords)
# def align_read_to_isoforms(self, genomic_read_start, genomic_read_end, overhang=1, read_len=36):
# """
# Align a single-end read to all of the gene's isoforms.
# Return an alignment as well as a set of isoform coordinates, for each isoform, corresponding
# to the places where the read aligned.
# When the read violates overhang or the read doesn't align to a particular
# isoform, the coordinate is set to None.
# """
# alignment = []
# isoform_coords = []
# genomic_parts_crossed = self.get_genomic_parts_crossed(genomic_read_start,
# genomic_read_end,
# read_len=read_len)
# if len(genomic_parts_crossed) == 0:
# print "zero genomic parts crossed"
# ##
# ## NEW: If no genomic parts are crossed, must be intronic read
# ##
# # if len(genomic_parts_crossed) == 0:
# # alignment = [0] * len(self.isoforms)
# # isoform_coords = [None] * len(self.isoforms)
# # return (alignment, isoform_coords)
# for isoform in self.isoforms:
# # check that parts aligned to in genomic coordinates exist
# # in the current isoform
# iso_read_start, iso_read_end = self.genomic_coords_to_isoform(isoform,
# genomic_read_start,
# genomic_read_end)
# if iso_read_start == None or iso_read_end == None:
# alignment.append(0)
# isoform_coords.append(None)
# continue
# # genomic parts have matching parts in isoform. Now check that
# # that they cross the same junctions
# iso_parts_crossed = isoform.get_isoform_parts_crossed(iso_read_start, iso_read_end)
# if iso_parts_crossed != genomic_parts_crossed:
# alignment.append(0)
# isoform_coords.append(None)
# continue
# # check that overhang violation is met on outer parts crossed (as long as
# # parts that are crossed in between are greater or equal to overhang constraint,
# # no need to check those)
# if overhang > 1:
# start_part, start_part_coord = isoform.get_part_by_coord(iso_read_start)
# if (start_part.end - (start_part_coord + start_part.start) + 1) < overhang:
# alignment.append(0)
# isoform_coords.append(None)
# continue
# end_part, end_part_coord = isoform.get_part_by_coord(iso_read_end)
# if ((end_part_coord + end_part.start) - end_part.start) + 1 < overhang:
# alignment.append(0)
# isoform_coords.append(None)
# continue
# # overhang is met and read aligns to the isoform
# alignment.append(1)
# # register coordinates
# isoform_coords.append((iso_read_start, iso_read_end))
# return (alignment, isoform_coords)
def align_read(self, genomic_read_start, genomic_read_end, overhang=1, read_len=36):
"""
Align a single-end read to all of the gene's isoforms.
Return an alignment binary vector of length K, where K is the number of isoforms.
The ith position in this vector has a 1 if the read aligns to the given isoform,
and 0 otherwise.
A read is of the form:
(genomic_start_coord, genomic_end_coord)
"""
alignment = []
# align all the reads to isoforms and return an alignment back, as well as the set of
# genomic coordinates for each isoform that the read aligns to.
alignment, aligned_genomic_coords = self.align_read_to_isoforms(genomic_read_start, genomic_read_end,
overhang=overhang)
# get the parts that are crossed in genomic coordinates space, taking into account
# the read's length
category = None
two_iso_alignment = None
# Deal with the two-isoform special case: categorize reads into NI, NE, NB
if len(self.isoforms) == 2:
if alignment == [1, 0]:
# NI
two_iso_alignment = [1, 0, 0]
# Check if it aligns to upstream or downstream inclusion junction
# We know that overhang violation hasn't been made here based on the alignment
# first convert the genomic coordinates of read to the first isoform's coordinates
isoform1 = self.isoforms[0]
# find isoform coordinate of genomic read start
iso1_read_start, c1 = self.genomic_coords_to_isoform(isoform1, genomic_read_start,
genomic_read_start)
# find isoform coordinate of genomic read end
iso1_read_end, c2 = self.genomic_coords_to_isoform(isoform1, genomic_read_end,
genomic_read_end)
# find which parts these coordinates land in
iso1_read_start_part, c1 = isoform1.get_part_by_coord(iso1_read_start)
iso1_read_end_part, c2 = isoform1.get_part_by_coord(iso1_read_end)
# if the read starts in the first part of the isoform and
# ends in the second part of the isoform, then it's an upstream
# inclusion junction read
if iso1_read_start_part == isoform1.parts[0] and iso1_read_end_part == isoform1.parts[1]:
category = 'upincjxn'
elif iso1_read_start_part == isoform1.parts[1] and iso1_read_end_part == isoform1.parts[2]:
# if the read starts in the second part of the isoform and
# ends in the third, then it's a downstream inclusion
# junction read
category = 'dnincjxn'
elif iso1_read_start_part == isoform1.parts[1] and iso1_read_end_part == isoform1.parts[1]:
# if the read starts and ends in the skipped exon body,
# then it's a body read
category = 'body'
else:
# If the read is not in one of those categories, the isoform must have more than three parts
assert(len(isoform1.parts) > 3)
# if the read doesn't fall into either of these categories, it can't possibly be
# an inclusion read
# if category == None:
# raise Exception, "Incoherent inclusion read: not upincjxn, dnincjxn, or body! %s" \
# %(str(iso1_read_start) + ' - ' + str(iso1_read_end))
elif alignment == [0, 1]:
# NE
two_iso_alignment = [0, 1, 0]
elif alignment == [1, 1]:
# NB
two_iso_alignment = [0, 0, 1]
else:
# Overhang violation
two_iso_alignment = [0, 0, 0]
return (two_iso_alignment, category)
return (alignment, category)
def align_paired_end_reads(self, reads, overhang=1):
"""
Take a set of paired-end reads parameterized by their genomic coordinates
and align them to all of the gene's isoforms.
For each read, return a pair where the first element is the alignment to all the
isoforms (a binary vector, with 1 in the ith position of the read aligns to the
ith isoform and 0 otherwise) and the second element is the length of the
fragment lengths that correspond to each alignment (-Inf if the read does not
align to the given isoform.)
"""
aligned_reads = []
for read in reads:
# align read to all of the isoforms
(alignment, isoform_coords) = self.align_read_pair(read[0], read[1], read[2], read[3], overhang=overhang)
frag_lens = [c2 - c1 + 1 for c1, c2 in isoform_coords]
aligned_reads.append(array([alignment, frag_lens]))
return aligned_reads
def align(self, seq, overhang=None):
"""
Given a short sequence, return a list of size len(self.isoforms) that says
for each isoform if the sequence is a substring of it (denoted 1) or not
(denoted 0).
"""
# if no overhang constraints are given, align without regard for overhang violations
alignment = []
if not overhang:
for iso in self.isoforms:
alignment.append(1 if seq in iso.seq else 0)
return alignment
category = None
# take overhang into account
for iso in self.isoforms:
# find read starting position in the isoform
read_start = iso.seq.find(seq)
if read_start == -1:
alignment.append(0)
continue
split_iso = iso.seq[read_start:]
prev_part = 0
oh_viol = False
for part in iso.parts:
# find beginning of next part
next_part = split_iso.find(part.seq)
if next_part == -1:
continue
# check that there is no overhang violation
part_segment = seq[prev_part:next_part]
remain_seq = seq[next_part:]
prev_part = next_part
if len(part_segment) != 0 and len(part_segment) < 4:
oh_viol = True
alignment.append(0)
break
if not oh_viol and remain_seq != '':
if len(remain_seq) < 4:
oh_viol = True
alignment.append(0)
if not oh_viol:
alignment.append(1)
# Deal with the two isoform case. Classify each read into a category
# and then return the counts [ni, ne, nb].
if len(self.isoforms) == 2:
if re.match("^0000.*1111.*$", seq) != None:
category = 'upincjxn'
elif re.match("^11111*$", seq) != None:
category = 'body'
elif re.match("^11111*22222*$", seq) != None:
category = 'dnincjxn'
if alignment == [1, 0]:
return ([1, 0, 0], category)
elif alignment == [0, 1]:
return ([0, 1, 0], category)
elif alignment == [1, 1]:
return ([0, 0, 1], category)
elif alignment == [0, 0]:
# overhang violation
return ([0, 0, 0], category)
return (alignment, category)
def get_iso(self, isoform_num):
return self.isoforms[isoform_num]
def avg_iso_len(self):
"""
Return the gene's average isoform length.
"""
iso_lens = [i['len'] for i in self.isoforms]
return mean(iso_lens)
def __str__(self):
return "gene_id: %s\nisoforms: %s" %(self.label, self.isoforms)
def __repr__(self):
return self.__str__()
class Isoform:
def __init__(self, gene, parts,
seq=None,
label=None):
"""
Builds an isoform given an isoform description.
"""
self.gene = gene
# ordered list of isoform parts (exons and introns)
self.parts = parts
self.num_parts = len(parts)
self.len = sum([part.len for part in parts])
self.seq = seq
self.label = label
# the genomic coordinates of the isoform are defined as the start coordinate
# of the first part and the last coordinate of the last part
first_part = self.parts[0]
last_part = self.parts[-1]
self.genomic_start = first_part.start
self.genomic_end = last_part.end
def get_parts_before(self, part):
"""
Return all the parts the given part in the isoform:
"""
parts_before = []
for p in self.parts:
if p.end < part.start:
parts_before.append(p)
else:
return parts_before
return parts_before
def get_part_by_coord(self, start_coord):
"""
Get the part that the *given isoform start coordinate* lands in, and the corresponding
part-based coordinate.
"""
isoform_interval_start = 0
isoform_interval_end = 0
prev_part = None
for part in self.parts:
# count the parts in isoform coordinate space and see in which part
# the given coordinate falls
isoform_interval_end += part.len - 1
# check that the part contains the single point
if isoform_interval_start <= start_coord and start_coord <= isoform_interval_end:
# find parts before and sum them up to get the part-based coordinate
# the part based coordinate is start_coord - previous part lengths
prev_parts_sum = sum([p.len for p in self.get_parts_before(part)])
part_start = start_coord - prev_parts_sum
return part, part_start
# add one to move to next part
isoform_interval_end += 1
isoform_interval_start = isoform_interval_end
return (None, None)
def get_isoform_parts_crossed(self, start, end):
"""
Return all parts (by number!) that are crossed in the isoform interval [start, end],
not including the parts where start and end land.
"""
# find the part where the first coordinate is
start_part, s1 = self.get_part_by_coord(start)
end_part, s2 = self.get_part_by_coord(end)
start_part_num = self.parts.index(start_part)
end_part_num = self.parts.index(end_part)
# find parts crossed in between start and end
return range(start_part_num + 1, end_part_num)
def cigar_overhang_met(self, cigar, overhang_len):
"""
Check that the overhang constraint is met in each
match condition of the read.
"""
overhang_met = True
for c in cigar:
# If it's the match (M) part of the cigar
# and the match length is less than the overhang
# constraint, then the constraint is violated
if (c[0] == 0) and (c[1] < overhang_len):
return False
return overhang_met
def get_local_cigar(self, start, read_len):
"""
Calculate a CIGAR string for a hypothetical read at a given start position, with a given read length"""
# If the read starts before or after the isoform, then it does not fit
if start < self.parts[0].start or self.parts[-1].end < start:
return None
# Look for the exon where the read starts
found = None
for i, p in enumerate(self.parts):
if p.start <= start and start <= p.end:
found = i
break
if found == None:
return None
# Create CIGAR string
cigar = []
rl = read_len
st = start
for i in range(found, len(self.parts)):
# the rest is on this exon?
if rl <= self.parts[i].end - st + 1:
cigar.append((0, rl))
return cigar
# the next exon is needed as well
else:
# is there a next exon?
if i+1 == len(self.parts):
return None
cigar.append((0, self.parts[i].end - st + 1))
cigar.append((3, self.parts[i+1].start - self.parts[i].end - 1))
rl = rl - (self.parts[i].end - st + 1)
st = self.parts[i+1].start
return cigar
def part_coord_to_isoform(self, part_start):
"""
Get the isoform coordinate that the *given part_start coordinate* lands in.
"""
isoform_interval_start = 0
isoform_coord = None
for part in self.parts:
if part.contains(part_start, part_start):
isoform_coord = isoform_interval_start + (part_start - part.start)
return isoform_coord
isoform_interval_start += part.len
return isoform_coord
def isoform_coords_to_genomic(self, isoform_start, isoform_end):
"""
Map coordinates of isoform to genomic coordinates.
"""
# get the part that each coordinate point lands in
start_part, start_part_coord = self.get_part_by_coord(isoform_start)
end_part, end_part_coord = self.get_part_by_coord(isoform_end)
# retrieve the corresponding genomic coordinates
genomic_start = self.gene.part_coords_to_genomic(start_part, start_part_coord)
genomic_end = self.gene.part_coords_to_genomic(end_part, end_part_coord)
return (genomic_start, genomic_end)
def __repr__(self):
parts_str = str([p.label for p in self.parts])
return "Isoform(gene = %s, g_start = %d, g_end = %d, len = %d,\n parts = %s)" \
%(self.gene.label, self.genomic_start, self.genomic_end, self.len, parts_str)
def pretty(d, indent=0):
for key, value in d.iteritems():
print ' ' * indent + str(key)
if isinstance(value, dict):
pretty(value, indent+1)
else:
print ' ' * (indent+1) + str(value)
def printTree(tree, depth = 0):
if tree == None or not type(tree) == dict:
print "\t" * depth, tree
else:
for key, val in tree.items():
print "\t" * depth, key
printTree(val, depth+1)
def print_gene_hierarchy(gene_hierarchy):
pretty(gene_hierarchy)
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(gene_hierarchy)
def load_genes_from_gff(gff_filename,
include_introns=False,
reverse_recs=False,
suppress_warnings=False):
"""
Load all records for a set of genes from a given GFF file.
Parse each gene into a Gene object.
"""
gff_db = GFFDatabase(gff_filename,
include_introns=include_introns,
reverse_recs=reverse_recs)
# dictionary mapping gene IDs to the list of all their relevant records
gff_genes = {}
num_genes = 0
for gene in gff_db.genes:
gene_records, gene_hierarchy = gff_db.get_genes_records([gene.get_id()])
# Record the gene's GFF record
gene_label = gene.get_id()
if gene_label not in gene_hierarchy:
if not suppress_warnings:
print "Skipping gene %s..." %(gene_label)
continue
gene_hierarchy[gene_label]['gene'] = gene
# Make a gene object out of the GFF records
gene_obj = make_gene_from_gff_records(gene_label,
gene_hierarchy[gene_label],
gene_records)
if gene_obj == None:
if not suppress_warnings:
print "Cannot make gene out of %s" %(gene_label)
continue
gff_genes[gene.get_id()] = {'gene_object': gene_obj,
'hierarchy': gene_hierarchy}
if (num_genes % 5000) == 0:
if not suppress_warnings:
#print "Through %d genes..." %(num_genes)
pass
num_genes += 1
num_genes = len(gff_genes)
if not suppress_warnings:
print "Loaded %d genes" %(num_genes)
return gff_genes
def make_gene_from_gff_records(gene_label,
gene_hierarchy,
gene_records):
"""
Make a gene from a gene hierarchy.
"""
mRNAs = gene_hierarchy['mRNAs']
# Each transcript is a set of exons
transcripts = []
isoform_desc = []
chrom = None
strand = "NA"
# Iterate through mRNAs in the order in which they were given in the
# GFF file
transcript_ids = [rec.get_id() for rec in gene_records \
if (rec.type == "mRNA" or rec.type == "transcript")]
if len(transcript_ids) == 0:
raise Exception, "Error: %s has no transcripts..." \
%(gene_label)
num_transcripts_with_exons = 0
used_transcript_ids = []
for transcript_id in transcript_ids:
transcript_info = mRNAs[transcript_id]
transcript_rec = transcript_info['record']
chrom = transcript_rec.seqid
strand = transcript_rec.strand
transcript_exons = transcript_info['exons']
exons = []
if len(transcript_exons) == 0:
#print "%s has no exons" %(transcript_id)
continue
# Record how many transcripts we have with exons children
# (i.e., usable transcripts)
num_transcripts_with_exons += 1
for exon_id, exon_info in transcript_exons.iteritems():
exon_rec = exon_info['record']
exon = Exon(exon_rec.start, exon_rec.end, from_gff_record={'record':
exon_rec,
'parent':
transcript_rec})
exons.append(exon)
# Sort exons by their start coordinate
exons = sorted(exons, key=lambda e: e.start)
# Exons that make up a transcript
transcripts.append(exons)
# Get exon labels to make transcript's description
exon_labels = [exon.label for exon in exons]
# Delimiter for internal representation of isoforms
#iso_delim = "_"
# The transcript's description
#for label in exon_labels:
# if iso_delim in label:
# raise Exception, "Cannot use %s in naming exons (%s) in GFF." %(iso_delim,
# label)
#desc = iso_delim.join(exon_labels)
isoform_desc.append(exon_labels)
# Record transcript ids that are not skipped
used_transcript_ids.append(transcript_id)
#if num_transcripts_with_exons < 2:
# print "WARNING: %s does not have at least two mRNA/transcript entries " \
# "with exons. Skipping over..." %(gene_label)
# return None
# Compile all exons used in all transcripts
all_exons = []
[all_exons.extend(transcript) for transcript in transcripts]
# Prefix chromosome with "chr" if it does not have it already
#if not chrom.startswith("chr"):
# chrom = "chr%s" %(chrom)
gene = Gene(isoform_desc, all_exons,
label=gene_label,
chrom=chrom,
strand=strand,
transcript_ids=used_transcript_ids)
return gene
def make_gene(parts_lens, isoforms, chrom=None):
"""
Make a gene out of the given parts lengths, where isoforms are a
list of list of numbers, where each list of numbers is an isoform
(the numbers corresponding to the exon parts in part_lens --
*one-based* index).
"""
parts = []
start_genomic = 0
part_num = 1
for part_len in parts_lens:
end_genomic = start_genomic + part_len - 1
exon = Exon(start_genomic, end_genomic, label=str(part_num))
parts.append(exon)
part_num += 1
start_genomic = end_genomic + 1
isoform_desc = []
for iso in isoforms:
desc = "_".join([str(iso_name) for iso_name in iso])
isoform_desc.append(desc)
gene = Gene(isoform_desc, parts, chrom=chrom)
return gene
def se_event_to_gene(up_len, se_len, dn_len, chrom,
label=None):
"""
Parse an SE event to a gene structure.
"""
exon1_start = 0
exon1_end = up_len - 1
exon2_start = exon1_end + 1
exon2_end = exon2_start + (se_len - 1)
exon3_start = exon2_end + 1
exon3_end = exon3_start + (dn_len - 1)
up_exon = Exon(exon1_start, exon1_end, label='A')
se_exon = Exon(exon2_start, exon2_end, label='B')
dn_exon = Exon(exon3_start, exon3_end, label='C')
parts = [up_exon, se_exon, dn_exon]
gene = Gene([['A', 'B', 'C'], ['A', 'C']], parts, label=label,
chrom=chrom)
return gene
def tandem_utr_event_to_gene(core_len, ext_len, chrom, label=None):
"""
Parse a tandem UTR event to a gene structure.
"""
exon1_start = 0
exon1_end = core_len - 1
exon2_start = exon1_end + 1
exon2_end = exon2_start + (ext_len - 1)
core_exon = Exon(exon1_start, exon1_end, label='TandemUTRCore')
ext_exon = Exon(exon2_start, exon2_end, label='TandemUTRExt')
parts = [core_exon, ext_exon]
gene = Gene([['TandemUTRCore', 'TandemUTRExt'], ['TandemUTRCore']], parts,
label=label, chrom=chrom)
return gene
def make_proximal_distal_exon_pair(proximal_exons, distal_exons):
"""
Make one large distal exon out of a small
"""
return
def afe_ale_event_to_gene(proximal_exons, distal_exons, event_type,
chrom, read_len=None, overhang_len=None,
label=None):
"""
Parse an AFE/ALE event to a gene.
"""
# Extend each exon by the junction that can be made between it
# and the gene body, based on read length and overhang
if read_len != None and overhang_len != None:
#num_junction_positions = read_len - (2 * (overhang_len - 1))
num_junction_positions = read_len
else:
num_junction_positions = 0
# Distal exon - farther away from the gene body
distal_exon_start = 0
sum_distal_exons_lens = sum([distal_exon['len'] for distal_exon \
in distal_exons])
sum_distal_exons_lens += num_junction_positions
distal_exon_end = sum_distal_exons_lens - 1
distal_exon = Exon(distal_exon_start, distal_exon_end,
label='%sDistal' %(event_type))
# Proximal exon - closer to the gene body
proximal_exon_start = distal_exon_end + 1
sum_proximal_exons_lens = sum([proximal_exon['len'] for proximal_exon \
in proximal_exons])
sum_proximal_exons_lens += num_junction_positions
proximal_exon_end = proximal_exon_start + (sum_proximal_exons_lens - 1)
proximal_exon = Exon(proximal_exon_start, proximal_exon_end,
label='%sProximal' %(event_type))
parts = None
if event_type == 'AFE':
parts = [distal_exon, proximal_exon]
else:
raise Exception, "Parsing wrong event type, %s" %(event_type)
# Make it so proximal isoform is always first
gene = Gene(['%sProximal' %(event_type), '%sDistal' %(event_type)],
parts, chrom=chrom, label=label)
return gene
if __name__ == '__main__':
pass
|
AltAnalyze
|
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/Gene.py
|
Gene.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.