code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _32bit_oper(value)
if indirect:
output.append('ld bc, %i' % I)
output.append('call __PISTORE32')
REQUIRES.add('pistore32.asm')
return output
# direct store
output.append('ld bc, %i' % I)
output.append('call __PSTORE32')
REQUIRES.add('pstore32.asm')
return output
|
def _pstore32(ins)
|
Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
| 6.403168 | 6.706625 | 0.954753 |
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _f16_oper(value)
if indirect:
output.append('ld bc, %i' % I)
output.append('call __PISTORE32')
REQUIRES.add('pistore32.asm')
return output
# direct store
output.append('ld bc, %i' % I)
output.append('call __PSTORE32')
REQUIRES.add('pstore32.asm')
return output
|
def _pstoref16(ins)
|
Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
| 7.046725 | 7.219427 | 0.976078 |
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _float_oper(value)
if indirect:
output.append('ld hl, %i' % I)
output.append('call __PISTOREF')
REQUIRES.add('storef.asm')
return output
# direct store
output.append('ld hl, %i' % I)
output.append('call __PSTOREF')
REQUIRES.add('pstoref.asm')
return output
|
def _pstoref(ins)
|
Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
| 7.29921 | 7.471739 | 0.976909 |
output = []
temporal = False
# 2nd operand first, because must go into the stack
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
if value[0] == '_':
output.append('ld de, (%s)' % value)
if indirect:
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
elif value[0] == '#':
output.append('ld de, %s' % value[1:])
else:
output.append('pop de')
temporal = value[0] != '$'
if indirect:
output.append('call __LOAD_DE_DE')
REQUIRES.add('lddede.asm')
# Now 1st operand
value = ins.quad[1]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
I = int(value)
if I >= 0:
I += 4 # Return Address + "push IX"
output.append('ld bc, %i' % I)
if not temporal:
if indirect:
output.append('call __PISTORE_STR')
REQUIRES.add('storestr.asm')
else:
output.append('call __PSTORE_STR')
REQUIRES.add('pstorestr.asm')
else:
if indirect:
output.append('call __PISTORE_STR2')
REQUIRES.add('storestr2.asm')
else:
output.append('call __PSTORE_STR2')
REQUIRES.add('pstorestr2.asm')
return output
|
def _pstorestr(ins)
|
Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
Note: This procedure proceeds as _pstore16, since STRINGS are 16bit pointers.
| 3.413299 | 3.405423 | 1.002313 |
assert type_ is None or isinstance(type_, SymbolTYPE)
if func is not None: # Try constant-folding
if is_number(operand): # e.g. ABS(-5)
return SymbolNUMBER(func(operand.value), lineno=lineno)
elif is_string(operand): # e.g. LEN("a")
return SymbolSTRING(func(operand.text), lineno=lineno)
if type_ is None:
type_ = operand.type_
if operator == 'MINUS':
if not type_.is_signed:
type_ = type_.to_signed()
operand = SymbolTYPECAST.make_node(type_, operand, lineno)
elif operator == 'NOT':
type_ = TYPE.ubyte
return cls(operator, operand, lineno, type_)
|
def make_node(cls, lineno, operator, operand, func=None, type_=None)
|
Creates a node for a unary operation. E.g. -x or LEN(a$)
Parameters:
-func: lambda function used on constant folding when possible
-type_: the resulting type (by default, the same as the argument).
For example, for LEN (str$), result type is 'u16'
and arg type is 'string'
| 4.567444 | 4.342848 | 1.051716 |
if type_ in cls.unsigned:
return {TYPE.ubyte: TYPE.byte_,
TYPE.uinteger: TYPE.integer,
TYPE.ulong: TYPE.long_}[type_]
if type_ in cls.decimals or type_ in cls.signed:
return type_
return cls.unknown
|
def to_signed(cls, type_)
|
Return signed type or equivalent
| 5.29723 | 4.96972 | 1.065901 |
NAME_TYPES = {cls.TYPE_NAMES[x]: x for x in cls.TYPE_NAMES}
return NAME_TYPES.get(typename, None)
|
def to_type(cls, typename)
|
Converts a type ID to name. On error returns None
| 4.671248 | 4.186557 | 1.115773 |
op = float(op)
negative = op < 0
if negative:
op = -op
DE = int(op)
HL = int((op - DE) * 2**16) & 0xFFFF
DE &= 0xFFFF
if negative: # Do C2
DE ^= 0xFFFF
HL ^= 0xFFFF
DEHL = ((DE << 16) | HL) + 1
HL = DEHL & 0xFFFF
DE = (DEHL >> 16) & 0xFFFF
return (DE, HL)
|
def f16(op)
|
Returns a floating point operand converted to 32 bits unsigned int.
Negative numbers are returned in 2 complement.
The result is returned in a tuple (DE, HL) => High16 (Int part), Low16 (Decimal part)
| 4.110703 | 3.517698 | 1.168577 |
ins.quad = [x for x in ins.quad]
for i in range(2, len(ins.quad)):
if is_float(ins.quad[i]):
de, hl = f16(ins.quad[i])
ins.quad[i] = str((de << 16) | hl)
ins.quad = tuple(ins.quad)
return ins
|
def _f16_to_32bit(ins)
|
If any of the operands within the ins(truction) are numeric,
convert them to its 32bit representation, otherwise leave them
as they are.
| 4.084955 | 4.042381 | 1.010532 |
op1, op2 = tuple(ins.quad[2:])
if _f_ops(op1, op2) is not None:
op1, op2 = _f_ops(op1, op2)
if op2 == 1: # A * 1 => A
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if op2 == -1:
return _neg32(ins)
output = _f16_oper(op1)
if op2 == 0:
output.append('ld hl, 0')
output.append('ld e, h')
output.append('ld d, l')
output.append('push de')
output.append('push hl')
return output
output = _f16_oper(op1, str(op2))
output.append('call __MULF16')
output.append('push de')
output.append('push hl')
REQUIRES.add('mulf16.asm')
return output
|
def _mulf16(ins)
|
Multiplies 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
| 3.757456 | 3.779647 | 0.994129 |
op1, op2 = tuple(ins.quad[2:])
if is_float(op2):
if float(op2) == 1:
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if float(op2) == -1:
return _negf(ins)
rev = not is_float(op1) and op1[0] != 't' and op2[0] == 't'
output = _f16_oper(op1, op2, reversed=rev)
output.append('call __DIVF16')
output.append('push de')
output.append('push hl')
REQUIRES.add('divf16.asm')
return output
|
def _divf16(ins)
|
Divides 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 1, do nothing
* If 2nd operand is -1, do NEG32
| 4.877874 | 4.969093 | 0.981643 |
raise InvalidICError(str(quad),
"Invalid quad code params for '%s' (expected %i, but got %i)" %
(quad, QUADS[quad][0], nparams)
)
|
def throw_invalid_quad_params(quad, QUADS, nparams)
|
Exception raised when an invalid number of params in the
quad code has been emmitted.
| 7.879777 | 7.499469 | 1.050711 |
def bin32(f):
result = ''
a = int(f) & 0xFFFFFFFF # ensures int 32
for i in range(32):
result = str(a % 2) + result
a = a >> 1
return result
def bindec32(f):
result = '0'
a = f
if f >= 1:
result = bin32(f)
result += '.'
c = int(a)
for i in range(32):
a -= c
a *= 2
c = int(a)
result += str(c)
return result
e = 0 # exponent
s = 1 if x < 0 else 0 # sign
m = abs(x) # mantissa
while m >= 1:
m /= 2.0
e += 1
while 0 < m < 0.5:
m *= 2.0
e -= 1
M = bindec32(m)[3:]
M = str(s) + M
E = bin32(e + 128)[-8:] if x != 0 else bin32(0)[-8:]
return M, E
|
def fp(x)
|
Returns a floating point number as EXP+128, Mantissa
| 3.567224 | 3.462639 | 1.030204 |
def bin2hex(y):
return "%02X" % int(y, 2)
M, E = fp(x)
C = '0' + bin2hex(E) + 'h'
ED = '0' + bin2hex(M[8:16]) + bin2hex(M[:8]) + 'h'
LH = '0' + bin2hex(M[24:]) + bin2hex(M[16:24]) + 'h'
return C, ED, LH
|
def immediate_float(x)
|
Returns C DE HL as values for loading
and immediate floating point.
| 4.099208 | 4.020427 | 1.019595 |
if stopOn is None:
for i in self.children:
i.inorder(funct)
else:
for i in self.children:
if i.inorder(funct) == stopOn:
return stopOn
return funct(self)
|
def inorder(self, funct, stopOn=None)
|
Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
| 2.518456 | 2.474535 | 1.017749 |
if funct(self.symbol) == stopOn and stopOn is not None:
return stopOn
if stopOn is None:
for i in self.children:
i.preorder(funct)
else:
for i in self.children:
if i.preorder(funct) == stopOn:
return stopOn
|
def preorder(self, funct, stopOn=None)
|
Iterates in preorder, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
| 2.911538 | 2.97795 | 0.977699 |
if stopOn is None:
for i in range(len(self.children) - 1, -1, -1):
self.children[i].postorder(funct)
else:
for i in range(len(self.children) - 1, -1, -1):
if self.children[i].postorder(funct) == stopOn:
return stopOn
return funct(self.symbol)
|
def postorder(self, funct, stopOn=None)
|
Iterates in postorder, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
| 1.936263 | 1.929191 | 1.003665 |
result = clss(symbol)
for i in nexts:
if i is None:
continue
if not isinstance(i, clss):
raise NotAnAstError(i)
result.appendChild(i)
return result
|
def makenode(clss, symbol, *nexts)
|
Stores the symbol in an AST instance,
and left and right to the given ones
| 4.618403 | 4.226429 | 1.092744 |
global OUTPUT
global INCLUDED
global CURRENT_DIR
global ENABLED
global INCLUDEPATH
global IFDEFS
global ID_TABLE
global CURRENT_FILE
global_.FILENAME = '(stdin)'
OUTPUT = ''
INCLUDED = {}
CURRENT_DIR = ''
pwd = get_include_path()
INCLUDEPATH = [os.path.join(pwd, 'library'), os.path.join(pwd, 'library-asm')]
ENABLED = True
IFDEFS = []
global_.has_errors = 0
global_.error_msg_cache.clear()
parser.defaulted_states = {}
ID_TABLE = DefinesTable()
del CURRENT_FILE[:]
|
def init()
|
Initializes the preprocessor
| 9.944012 | 10.028498 | 0.991575 |
f1 = os.path.basename(sys.argv[0]).lower() # script filename
f2 = os.path.basename(sys.executable).lower() # Executable filename
# If executable filename and script name are the same, we are
if f1 == f2 or f2 == f1 + '.exe': # under a "compiled" python binary
result = os.path.dirname(os.path.realpath(sys.executable))
else:
result = os.path.dirname(os.path.realpath(__file__))
return result
|
def get_include_path()
|
Default include path using a tricky sys
calls.
| 3.997144 | 3.874913 | 1.031544 |
fname = api.utils.sanitize_filename(fname)
i_path = [CURRENT_DIR] + INCLUDEPATH if local_first else list(INCLUDEPATH)
i_path.extend(OPTIONS.include_path.value.split(':') if OPTIONS.include_path.value else [])
if os.path.isabs(fname):
if os.path.isfile(fname):
return fname
else:
for dir_ in i_path:
path = api.utils.sanitize_filename(os.path.join(dir_, fname))
if os.path.exists(path):
return path
error(lineno, "file '%s' not found" % fname)
return ''
|
def search_filename(fname, lineno, local_first)
|
Search a filename into the list of the include path.
If local_first is true, it will try first in the current directory of
the file being analyzed.
| 3.692836 | 3.332031 | 1.108284 |
global CURRENT_DIR
filename = search_filename(filename, lineno, local_first)
if filename not in INCLUDED.keys():
INCLUDED[filename] = []
if len(CURRENT_FILE) > 0: # Added from which file, line
INCLUDED[filename].append((CURRENT_FILE[-1], lineno))
CURRENT_FILE.append(filename)
CURRENT_DIR = os.path.dirname(filename)
return LEXER.include(filename)
|
def include_file(filename, lineno, local_first)
|
Performs a file inclusion (#include) in the preprocessor.
Writes down that "filename" was included at the current file,
at line <lineno>.
If local_first is True, then it will first search the file in the
local path before looking for it in the include path chain.
This is used when doing a #include "filename".
| 4.592426 | 5.149939 | 0.891744 |
filename = search_filename(filename, lineno, local_first)
if filename not in INCLUDED.keys(): # If not already included
return include_file(filename, lineno, local_first) # include it and return
# Now checks if the file has been included more than once
if len(INCLUDED[filename]) > 1:
warning(lineno, "file '%s' already included more than once, in file "
"'%s' at line %i" %
(filename, INCLUDED[filename][0][0], INCLUDED[filename][0][1]))
# Empty file (already included)
LEXER.next_token = '_ENDFILE_'
return ''
|
def include_once(filename, lineno, local_first)
|
Performs a file inclusion (#include) in the preprocessor.
Writes down that "filename" was included at the current file,
at line <lineno>.
The file is ignored if it was previuosly included (a warning will
be emitted though).
If local_first is True, then it will first search the file in the
local path before looking for it in the include path chain.
This is used when doing a #include "filename".
| 5.449557 | 5.860834 | 0.929826 |
try:
tmp = [str(x()) if isinstance(x, MacroCall) else x for x in p[1]]
except PreprocError as v:
error(v.lineno, v.message)
tmp.append(p[2])
p[0] = tmp
|
def p_program_tokenstring(p)
|
program : defs NEWLINE
| 6.532991 | 6.370153 | 1.025563 |
global CURRENT_DIR
p[0] = [p[1] + p[2]] + p[3] + [p[4]]
CURRENT_FILE.pop() # Remove top of the stack
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1])
|
def p_include_file(p)
|
include_file : include NEWLINE program _ENDFILE_
| 4.60081 | 4.148034 | 1.109154 |
global CURRENT_DIR
p[0] = [p[1] + p[2]] + p[3] + [p[4]]
CURRENT_FILE.pop() # Remove top of the stack
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1])
|
def p_include_once_ok(p)
|
include_file : include_once NEWLINE program _ENDFILE_
| 4.646745 | 4.03983 | 1.150233 |
if ENABLED:
p[0] = include_file(p[2], p.lineno(2), local_first=True)
else:
p[0] = []
p.lexer.next_token = '_ENDFILE_'
|
def p_include(p)
|
include : INCLUDE STRING
| 9.08909 | 8.682608 | 1.046816 |
if ENABLED:
p[0] = include_file(p[2], p.lineno(2), local_first=False)
else:
p[0] = []
p.lexer.next_token = '_ENDFILE_'
|
def p_include_fname(p)
|
include : INCLUDE FILENAME
| 9.414836 | 8.857437 | 1.06293 |
if ENABLED:
p[0] = include_once(p[3], p.lineno(3), local_first=True)
else:
p[0] = []
if not p[0]:
p.lexer.next_token = '_ENDFILE_'
|
def p_include_once(p)
|
include_once : INCLUDE ONCE STRING
| 7.961067 | 7.92886 | 1.004062 |
p[0] = []
if ENABLED:
p[0] = include_once(p[3], p.lineno(3), local_first=False)
else:
p[0] = []
if not p[0]:
p.lexer.next_token = '_ENDFILE_'
|
def p_include_once_fname(p)
|
include_once : INCLUDE ONCE FILENAME
| 7.455809 | 6.768832 | 1.101491 |
if ENABLED:
if p[4]:
if SPACES.match(p[4][0]):
p[4][0] = p[4][0][1:]
else:
warning(p.lineno(1), "missing whitespace after the macro name")
ID_TABLE.define(p[2], args=p[3], value=p[4], lineno=p.lineno(2),
fname=CURRENT_FILE[-1])
p[0] = []
|
def p_define(p)
|
define : DEFINE ID params defs
| 5.95494 | 5.571095 | 1.068899 |
# Defines the 'epsilon' parameter
p[0] = [ID('', value='', args=None, lineno=p.lineno(1),
fname=CURRENT_FILE[-1])]
|
def p_define_params_empty(p)
|
params : LP RP
| 25.30998 | 26.433193 | 0.957507 |
for i in p[2]:
if not isinstance(i, ID):
error(p.lineno(3),
'"%s" might not appear in a macro parameter list' % str(i))
p[0] = None
return
names = [x.name for x in p[2]]
for i in range(len(names)):
if names[i] in names[i + 1:]:
error(p.lineno(3),
'Duplicated name parameter "%s"' % (names[i]))
p[0] = None
return
p[0] = p[2]
|
def p_define_params_paramlist(p)
|
params : LP paramlist RP
| 3.130396 | 3.141921 | 0.996332 |
p[0] = [ID(p[1], value='', args=None, lineno=p.lineno(1),
fname=CURRENT_FILE[-1])]
|
def p_paramlist_single(p)
|
paramlist : ID
| 14.520524 | 12.274528 | 1.18298 |
p[0] = p[1] + [ID(p[3], value='', args=None, lineno=p.lineno(1),
fname=CURRENT_FILE[-1])]
|
def p_paramlist_paramlist(p)
|
paramlist : paramlist COMMA ID
| 13.483508 | 10.254329 | 1.314909 |
global ENABLED
if ENABLED:
p[0] = [p[2]] + p[3]
p[0] += ['#line %i "%s"' % (p.lineno(4) + 1, CURRENT_FILE[-1])]
else:
p[0] = ['#line %i "%s"' % (p.lineno(4) + 1, CURRENT_FILE[-1])]
ENABLED = IFDEFS[-1][0]
IFDEFS.pop()
|
def p_ifdef(p)
|
ifdef : if_header NEWLINE program ENDIF
| 4.042553 | 3.815121 | 1.059613 |
global ENABLED
p[0] = p[1] + p[2]
p[0] += ['#line %i "%s"' % (p.lineno(3) + 1, CURRENT_FILE[-1])]
ENABLED = IFDEFS[-1][0]
IFDEFS.pop()
|
def p_ifdef_else(p)
|
ifdef : ifdefelsea ifdefelseb ENDIF
| 7.774165 | 7.146805 | 1.087782 |
global ENABLED
if ENABLED:
p[0] = [p[2]] + p[3]
else:
p[0] = []
ENABLED = not ENABLED
|
def p_ifdef_else_a(p)
|
ifdefelsea : if_header NEWLINE program
| 4.143275 | 4.014042 | 1.032195 |
global ENABLED
if ENABLED:
p[0] = ['#line %i "%s"%s' % (p.lineno(1) + 1, CURRENT_FILE[-1], p[2])]
p[0] += p[3]
else:
p[0] = []
|
def p_ifdef_else_b(p)
|
ifdefelseb : ELSE NEWLINE program
| 5.336611 | 5.109688 | 1.04441 |
global ENABLED
IFDEFS.append((ENABLED, p.lineno(2)))
ENABLED = ID_TABLE.defined(p[2])
|
def p_if_header(p)
|
if_header : IFDEF ID
| 24.548096 | 15.838132 | 1.549936 |
global ENABLED
IFDEFS.append((ENABLED, p.lineno(2)))
ENABLED = not ID_TABLE.defined(p[2])
|
def p_ifn_header(p)
|
if_header : IFNDEF ID
| 26.175632 | 19.087532 | 1.371347 |
global ENABLED
IFDEFS.append((ENABLED, p.lineno(1)))
ENABLED = bool(int(p[2])) if p[2].isdigit() else ID_TABLE.defined(p[2])
|
def p_if_expr_header(p)
|
if_header : IF expr
| 13.017517 | 11.669338 | 1.115532 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a < b else '0'
|
def p_exprlt(p)
|
expr : expr LT expr
| 2.815493 | 2.442285 | 1.152811 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a <= b else '0'
|
def p_exprle(p)
|
expr : expr LE expr
| 3.031064 | 2.478031 | 1.223174 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a > b else '0'
|
def p_exprgt(p)
|
expr : expr GT expr
| 2.74121 | 2.410606 | 1.137146 |
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a >= b else '0'
|
def p_exprge(p)
|
expr : expr GE expr
| 2.909868 | 2.499303 | 1.164272 |
global CURRENT_DIR
prev_dir = CURRENT_DIR
CURRENT_FILE.append(filename)
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1])
LEXER.input(input_, filename)
LEXER.lex.begin(state)
parser.parse(lexer=LEXER, debug=OPTIONS.Debug.value > 2)
CURRENT_FILE.pop()
CURRENT_DIR = prev_dir
|
def filter_(input_, filename='<internal>', state='INITIAL')
|
Filter the input string thought the preprocessor.
result is appended to OUTPUT global str
| 4.953292 | 5.074691 | 0.976078 |
if name is None:
tplot_names = list(data_quants.keys())
for i in tplot_names:
del data_quants[i]
return
if not isinstance(name, list):
name = [name]
entries = []
###
for i in name:
if ('?' in i) or ('*' in i):
for j in data_quants.keys():
var_verif = fnmatch.fnmatch(data_quants[j].name, i)
if var_verif == 1:
entries.append(data_quants[j].name)
else:
continue
for key in entries:
if key in data_quants:
del data_quants[key]
###
elif i not in data_quants.keys():
print(str(i) + " is currently not in pytplot.")
return
else:
temp_data_quants = data_quants[i]
str_name = temp_data_quants.name
del data_quants[str_name]
return
|
def del_data(name=None)
|
This function will delete tplot variables that are already stored in memory.
Parameters:
name : str
Name of the tplot variable to be deleted. If no name is provided, then
all tplot variables will be deleted.
Returns:
None
Examples:
>>> # Delete Variable 1
>>> import pytplot
>>> pytplot.del_data("Variable1")
| 3.625292 | 3.446289 | 1.051941 |
if not isinstance(min, (int, float, complex)):
min = tplot_utilities.str_to_int(min)
if not isinstance(max, (int, float, complex)):
max = tplot_utilities.str_to_int(max)
if 'x_range' in tplot_opt_glob:
lim_info['xlast'] = tplot_opt_glob['x_range']
else:
lim_info['xfull'] = Range1d(min, max)
lim_info['xlast'] = Range1d(min, max)
tplot_opt_glob['x_range'] = [min, max]
return
|
def xlim(min, max)
|
This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
| 2.890041 | 3.104169 | 0.931019 |
if name not in data_quants.keys():
print("That name is currently not in pytplot.")
return
temp_data_quant = data_quants[name]
temp_data_quant.zaxis_opt['z_range'] = [min, max]
return
|
def zlim(name, min, max)
|
This function will set the z axis range displayed for a specific tplot variable.
This is only used for spec plots, where the z axis represents the magnitude of the values
in each bin.
Parameters:
name : str
The name of the tplot variable that you wish to set z limits for.
min : flt
The start of the z axis.
max : flt
The end of the z axis.
Returns:
None
Examples:
>>> # Change the z range of Variable1
>>> import pytplot
>>> x_data = [1,2,3]
>>> y_data = [ [1,2,3] , [4,5,6], [7,8,9] ]
>>> v_data = [1,2,3]
>>> pytplot.store_data("Variable3", data={'x':x_data, 'y':y_data, 'v':v_data})
>>> pytplot.zlim('Variable1', 2, 3)
| 7.276906 | 6.287845 | 1.157297 |
if name not in data_quants.keys():
print("That name is currently not in pytplot.")
return
temp_data_quant = data_quants[name]
temp_data_quant.yaxis_opt['y_range'] = [min, max]
return
|
def ylim(name, min, max)
|
This function will set the y axis range displayed for a specific tplot variable.
Parameters:
name : str
The name of the tplot variable that you wish to set y limits for.
min : flt
The start of the y axis.
max : flt
The end of the y axis.
Returns:
None
Examples:
>>> # Change the y range of Variable1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
| 7.010754 | 6.313552 | 1.110429 |
if 'title_size' in pytplot.tplot_opt_glob:
size = pytplot.tplot_opt_glob['title_size']
if 'title_text' in pytplot.tplot_opt_glob:
if pytplot.tplot_opt_glob['title_text'] != '':
layout.addItem(LabelItem(pytplot.tplot_opt_glob['title_text'], size=size, color='k'), row=0, col=0)
return True
return False
|
def _set_pyqtgraph_title(layout)
|
Private function to add a title to the first row of the window.
Returns True if a Title is set. Else, returns False.
| 2.50631 | 2.295563 | 1.091806 |
global data_quants
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
temp_data_quant = data_quants[name]
data_val = temp_data_quant.data.values
time_val = temp_data_quant.data.index
return(time_val, data_val)
|
def get_data(name)
|
This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1")
| 4.416694 | 3.774332 | 1.170192 |
#check if old name is in current dictionary
if old_name not in pytplot.data_quants.keys():
print("That name is currently not in pytplot")
return
#if old name input is a number, convert to corresponding name
if isinstance(old_name, int):
old_name = pytplot.data_quants[old_name].name
#remake dictionary with new name in old name's slot
d = pytplot.data_quants
d2 = OrderedDict([(new_name, v) if k == old_name else (k, v) for k, v in d.items()])
data_quants = d2
for key in d2:
data_quants[key].name = key
pytplot.data_quants = data_quants
return
|
def tplot_rename(old_name, new_name)
|
This function will rename tplot variables that are already stored in memory.
Parameters:
old_name : str
Old name of the Tplot Variable
new_name : str
New name of the Tplot Variable
Returns:
None
Examples:
>>> # Rename Variable 1 to Variable 2
>>> import pytplot
>>> pytplot.tplot_rename("Variable1", "Variable2")
| 3.284808 | 3.257736 | 1.00831 |
if self.spec_bins is None:
return
if len(self.spec_bins) == len(self.data.index):
self.spec_bins_time_varying = True
break_top_loop = False
for index, row in self.spec_bins.iterrows():
if row.isnull().values.all():
continue
else:
for i in row.index:
if np.isfinite(row[i]) and np.isfinite(row[i + 1]):
ascending = row[i] < row[i + 1]
break_top_loop = True
break
else:
continue
if break_top_loop:
break
else:
ascending = self.spec_bins[0].iloc[0] < self.spec_bins[1].iloc[0]
return ascending
|
def _check_spec_bins_ordering(self)
|
This is a private function of the TVar object, this is run during
object creation to check if spec_bins are ascending or descending
| 3.070861 | 2.823816 | 1.087486 |
index = 0
return_names=[]
for key, _ in data_quants.items():
if isinstance(data_quants[key].data, list):
if isinstance(key, str):
names_to_print = data_quants[key].name + " data from: "
for name in data_quants[key].data:
names_to_print = names_to_print + " " + name
print(index, ":", names_to_print)
index+=1
else:
if isinstance(key, str):
names_to_print = data_quants[key].name
print(index, ":", names_to_print)
index+=1
return_names.append(names_to_print)
return return_names
|
def tplot_names()
|
This function will print out and return a list of all current Tplot Variables stored in the memory.
Parameters:
None
Returns:
list : list of str
A list of all Tplot Variables stored in the memory
Examples:
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> tnames = pyplot.tplot_names()
0 : Variable 1
| 2.850114 | 3.049534 | 0.934607 |
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
print("Start Time: " + tplot_utilities.int_to_str(data_quants[name].trange[0]))
print("End Time: " + tplot_utilities.int_to_str(data_quants[name].trange[1]))
return(data_quants[name].trange[0], data_quants[name].trange[1])
|
def get_timespan(name)
|
This function extracts the time span from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_begin : float
The beginning of the time series
time_end : float
The end of the time series
Examples:
>>> # Retrieve the time span from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time1, time2 = pytplot.get_timespan("Variable1")
| 3.567539 | 3.461306 | 1.030692 |
if keyword is 'days':
dt *= 86400
elif keyword is 'hours':
dt *= 3600
elif keyword is 'minutes':
dt *= 60
elif keyword is 'seconds':
dt *= 1
else:
print("Invalid 'keyword' option.\nEnum(None, 'hours', 'minutes', 'seconds', 'days')")
if not isinstance(t1, (int, float, complex)):
t1 = tplot_utilities.str_to_int(t1)
t2 = t1+dt
xlim(t1, t2)
return
|
def timespan(t1, dt, keyword = 'days')
|
This function will set the time range for all time series plots. This is a wrapper for the function "xlim" to
better handle time axes.
Parameters:
t1 : flt/str
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
dt : flt
The time duration of the plots. Default is number of days.
keyword : str
Sets the units of the "dt" variable. Days, hours, minutes, and seconds are all accepted.
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.timespan(1500249600, 1)
>>> # The same as above, but using different inputs
>>> pytplot.timespan("2017-07-17 00:00:00", 24, keyword='hours')
| 3.468701 | 3.342823 | 1.037656 |
option = option.lower()
temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob)
pytplot.tplot_opt_glob = temp
return
|
def tplot_options(option, value)
|
This function allows the user to set several global options for the generated plots.
Parameters:
option : str
The name of the option. See section below
value : str/int/float/list
The value of the option. See section below.
Options:
============ ========== =====
Options Value type Notes
============ ========== =====
title str Title of the the entire output
title_size int Font size of the output
wsize [int, int] [height, width], pixel size of the plot window
title_align int Offset position in pixels of the title
var_label srt Name of the tplot variable to be used as another x axis
alt_range [flt, flt] The min and max altitude to be plotted on all alt plots
map_x_range [int, int] The min and max longitude to be plotted on all map plots
map_y_range [int, int] The min and max latitude to be plotted on all map plots
x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots
data_gap int Number of seconds with consecutive nan values allowed before no interp should occur
crosshair bool Option allowing crosshairs and crosshair legend
roi [str, str] Times between which there's a region of interest for a user
============ ========== =====
Returns:
None
Examples:
>>> # Set the plot title
>>> import pytplot
>>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563')
>>> # Set the window size
>>> pytplot.tplot_options('wsize', [1000,500])
| 5.601828 | 9.34497 | 0.599448 |
if isinstance(names,int):
names = list(data_quants.keys())[names-1]
if not isinstance(names, list):
names = [names]
#Check that we have all available data
for name in names:
if isinstance(data_quants[name].data, list):
for data_name in data_quants[name].data:
if data_name not in names:
names.append(data_name)
#Pickle it up
to_pickle =[]
for name in names:
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
to_pickle.append(data_quants[name])
num_quants = len(to_pickle)
to_pickle = [num_quants] + to_pickle
temp_tplot_opt_glob = tplot_opt_glob
to_pickle.append(temp_tplot_opt_glob)
if filename==None:
filename='var_'+'-'.join(names)+'.pytplot'
pickle.dump(to_pickle, open(filename, "wb"))
return
|
def tplot_save(names, filename=None)
|
This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the
"tplot_restore" command.
Parameters:
names : str/list
A string or a list of strings of the tplot variables you would like saved.
filename : str, optional
The filename where you want to save the file.
Returns:
None
Examples:
>>> # Save a single tplot variable
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
>>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot')
| 3.489798 | 3.655058 | 0.954786 |
if isinstance(name, int):
name = list(data_quants.keys())[name-1]
if not isinstance(name, list):
name = [name]
name_num = len(name)
ymin = None
ymax = None
for i in range(name_num):
if name[i] not in data_quants.keys():
print(str(name[i]) + " is currently not in pytplot.")
return
temp_data_quant = data_quants[name[i]]
yother = temp_data_quant.data
if trg is not None:
for column_name in yother.columns:
y = yother[column_name]
trunc_tempt_data_quant = y.truncate(before=trg[0], after=trg[1])
loc_min = trunc_tempt_data_quant.min(skipna=True)
loc_max = trunc_tempt_data_quant.max(skipna=True)
if (ymin is None) or (loc_min < ymin):
ymin = loc_min
if (ymax is None) or (loc_max > ymax):
ymax = loc_max
else:
for column_name in yother.columns:
y = yother[column_name]
loc_min = y.min(skipna=True)
loc_max = y.max(skipna=False)
if (ymin is None) or (loc_min < ymin):
ymin = loc_min
if (ymax is None) or (loc_max > ymax):
ymax = loc_max
print("Y Minimum: " + str(ymin))
print("Y Maximum: " + str(ymax))
return ymin, ymax
|
def get_ylimits(name, trg=None)
|
This function will get extract the y-limits from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
trg : list, optional
The time range that you would like to look in
Returns:
ymin : float
The minimum value of y
ymax : float
The maximum value of y
Examples:
>>> # Retrieve the y-limits from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> y1, y2 = pytplot.get_ylimits("Variable1")
| 2.10468 | 2.085504 | 1.009195 |
todaystring = datetime.datetime.now().strftime('%Y-%m-%d %H%M%S')
extra_layouts['time_stamp'] = todaystring
else:
if 'time_stamp' in extra_layouts:
del extra_layouts['time_stamp']
return
|
def timestamp(val):
if val is 'on'
|
This function will turn on a time stamp that shows up at the bottom of every generated plot.
Parameters
val str
A string that can either be 'on' or 'off'.
Returns
None
Examples
# Turn on the timestamp
import pytplot
pytplot.timestamp('on')
| 4.836646 | 7.171728 | 0.674405 |
'''
I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly.
'''
if self.picturenotgened:
self.generatePicture(self.getBoundingParents()[0].rect())
self.picturenotgened = False
pg.ImageItem.paint(self, p, *args)
self.generatePicture(self.getBoundingParents()[0].rect())
|
def paint(self, p, *args)
|
I have no idea why, but we need to generate the picture after painting otherwise
it draws incorrectly.
| 8.278251 | 3.933718 | 2.104434 |
profile = debug.Profiler()
gotNewData = False
if image is None:
if self.image is None:
return
else:
gotNewData = True
shapeChanged = (self.image is None or image.shape != self.image.shape)
image = image.view(np.ndarray)
if self.image is None or image.dtype != self.image.dtype:
self._effectiveLut = None
self.image = image
if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:
if 'autoDownsample' not in kargs:
kargs['autoDownsample'] = True
if shapeChanged:
self.prepareGeometryChange()
self.informViewBoundsChanged()
profile()
if autoLevels is None:
if 'levels' in kargs:
autoLevels = False
else:
autoLevels = True
if autoLevels:
img = self.image
while img.size > 2**16:
img = img[::2, ::2]
mn, mx = img.min(), img.max()
if mn == mx:
mn = 0
mx = 255
kargs['levels'] = [mn,mx]
profile()
self.setOpts(update=False, **kargs)
profile()
self.qimage = None
self.update()
profile()
if gotNewData:
self.sigImageChanged.emit()
|
def setImage(self, image=None, autoLevels=None, **kargs)
|
Same this as ImageItem.setImage, but we don't update the drawing
| 2.92837 | 2.78448 | 1.051676 |
if root is None:
root = self.item
preItems = []
postItems = []
if isinstance(root, QtGui.QGraphicsScene):
childs = [i for i in root.items() if i.parentItem() is None]
rootItem = []
else:
# CHANGE: For GraphicsLayouts, there is no function for childItems(), so I just
# replaced it with .items()
try:
childs = root.childItems()
except:
childs = root.items()
rootItem = [root]
childs.sort(key=lambda a: a.zValue())
while len(childs) > 0:
ch = childs.pop(0)
tree = self.getPaintItems(ch)
if int(ch.flags() & ch.ItemStacksBehindParent) > 0 or (
ch.zValue() < 0 and int(ch.flags() & ch.ItemNegativeZStacksBehindParent) > 0):
preItems.extend(tree)
else:
postItems.extend(tree)
return preItems + rootItem + postItems
|
def getPaintItems(self, root=None)
|
Return a list of all items that should be painted in the correct order.
| 3.669547 | 3.546712 | 1.034633 |
'''
A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns
'''
# df here is actually a dictionary of several dataframes, 'mat', 'mat_orig',
# etc
if df is None:
df = net.dat_to_df()
if norm_type == 'zscore':
df = zscore_df(df, axis, keep_orig)
if norm_type == 'qn':
df = qn_df(df, axis, keep_orig)
net.df_to_dat(df)
|
def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False)
|
A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns
| 5.883853 | 2.744676 | 2.143733 |
'''
do quantile normalization of a dataframe dictionary, does not write to net
'''
df_qn = {}
for mat_type in df:
inst_df = df[mat_type]
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
missing_values = inst_df.isnull().values.any()
# make mask of missing values
if missing_values:
# get nan mask
missing_mask = pd.isnull(inst_df)
# tmp fill in na with zero, will not affect qn
inst_df = inst_df.fillna(value=0)
# calc common distribution
common_dist = calc_common_dist(inst_df)
# swap in common distribution
inst_df = swap_in_common_dist(inst_df, common_dist)
# swap back in missing values
if missing_values:
inst_df = inst_df.mask(missing_mask, other=np.nan)
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
df_qn[mat_type] = inst_df
return df_qn
|
def qn_df(df, axis='row', keep_orig=False)
|
do quantile normalization of a dataframe dictionary, does not write to net
| 3.938115 | 3.311168 | 1.189343 |
'''
calculate a common distribution (for col qn only) that will be used to qn
'''
# axis is col
tmp_arr = np.array([])
col_names = df.columns.tolist()
for inst_col in col_names:
# sort column
tmp_vect = df[inst_col].sort_values(ascending=False).values
# stacking rows vertically (will transpose)
if tmp_arr.shape[0] == 0:
tmp_arr = tmp_vect
else:
tmp_arr = np.vstack((tmp_arr, tmp_vect))
tmp_arr = tmp_arr.transpose()
common_dist = tmp_arr.mean(axis=1)
return common_dist
|
def calc_common_dist(df)
|
calculate a common distribution (for col qn only) that will be used to qn
| 4.859018 | 3.429543 | 1.416812 |
'''
take the zscore of a dataframe dictionary, does not write to net (self)
'''
df_z = {}
for mat_type in df:
if keep_orig and mat_type == 'mat':
mat_orig = deepcopy(df[mat_type])
inst_df = df[mat_type]
if axis == 'row':
inst_df = inst_df.transpose()
df_z[mat_type] = (inst_df - inst_df.mean())/inst_df.std()
if axis == 'row':
df_z[mat_type] = df_z[mat_type].transpose()
if keep_orig:
df_z['mat_orig'] = mat_orig
return df_z
|
def zscore_df(df, axis='row', keep_orig=False)
|
take the zscore of a dataframe dictionary, does not write to net (self)
| 3.237395 | 2.253799 | 1.436417 |
'''
calculate pvalue of category closeness
'''
# calculate the distance between the data points within the same category and
# compare to null distribution
for inst_rc in ['row', 'col']:
inst_nodes = deepcopy(net.dat['nodes'][inst_rc])
inst_index = deepcopy(net.dat['node_info'][inst_rc]['clust'])
# reorder based on clustered order
inst_nodes = [ inst_nodes[i] for i in inst_index]
# make distance matrix dataframe
dm = dist_matrix_lattice(inst_nodes)
node_infos = list(net.dat['node_info'][inst_rc].keys())
all_cats = []
for inst_info in node_infos:
if 'dict_cat_' in inst_info:
all_cats.append(inst_info)
for cat_dict in all_cats:
tmp_dict = net.dat['node_info'][inst_rc][cat_dict]
pval_name = cat_dict.replace('dict_','pval_')
net.dat['node_info'][inst_rc][pval_name] = {}
for cat_name in tmp_dict:
subset = tmp_dict[cat_name]
inst_median = calc_median_dist_subset(dm, subset)
hist = calc_hist_distances(dm, subset, inst_nodes)
pval = 0
for i in range(len(hist['prob'])):
if i == 0:
pval = hist['prob'][i]
if i >= 1:
if inst_median >= hist['bins'][i]:
pval = pval + hist['prob'][i]
net.dat['node_info'][inst_rc][pval_name][cat_name] = pval
|
def main(net)
|
calculate pvalue of category closeness
| 4.127836 | 3.850813 | 1.071939 |
'''
1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories
'''
import numpy as np
from ast import literal_eval as make_tuple
test = {}
test['row'] = df['mat'].index.tolist()
test['col'] = df['mat'].columns.tolist()
# if type( test_row ) is not str and type( test_row ) is not tuple:
found_tuple = {}
found_number = {}
for inst_rc in ['row','col']:
inst_name = test[inst_rc][0]
found_tuple[inst_rc] = False
found_number[inst_rc] = False
if type(inst_name) != tuple:
if type(inst_name) is int or type(inst_name) is float or type(inst_name) is np.int64:
found_number[inst_rc] = True
else:
check_open = inst_name[0]
check_comma = inst_name.find(',')
check_close = inst_name[-1]
if check_open == '(' and check_close == ')' and check_comma > 0 \
and check_comma < len(inst_name):
found_tuple[inst_rc] = True
# convert to tuple if necessary
#################################################
if found_tuple['row']:
row_names = df['mat'].index.tolist()
row_names = [make_tuple(x) for x in row_names]
df['mat'].index = row_names
if found_tuple['col']:
col_names = df['mat'].columns.tolist()
col_names = [make_tuple(x) for x in col_names]
df['mat'].columns = col_names
# convert numbers to string if necessary
#################################################
if found_number['row']:
row_names = df['mat'].index.tolist()
row_names = [str(x) for x in row_names]
df['mat'].index = row_names
if found_number['col']:
col_names = df['mat'].columns.tolist()
col_names = [str(x) for x in col_names]
df['mat'].columns = col_names
return df
|
def main(df)
|
1) check that rows are strings (in case of numerical names)
2) check for tuples, and in that case load tuples to categories
| 2.55089 | 2.097193 | 1.216335 |
'''
This is always run when data is loaded.
'''
from . import categories
# check if df has unique values
df['mat'] = make_unique_labels.main(net, df['mat'])
net.dat['mat'] = df['mat'].values
net.dat['nodes']['row'] = df['mat'].index.tolist()
net.dat['nodes']['col'] = df['mat'].columns.tolist()
for inst_rc in ['row', 'col']:
if type(net.dat['nodes'][inst_rc][0]) is tuple:
# get the number of categories from the length of the tuple
# subtract 1 because the name is the first element of the tuple
num_cat = len(net.dat['nodes'][inst_rc][0]) - 1
net.dat['node_info'][inst_rc]['full_names'] = net.dat['nodes']\
[inst_rc]
for inst_rcat in range(num_cat):
net.dat['node_info'][inst_rc]['cat-' + str(inst_rcat)] = \
[i[inst_rcat + 1] for i in net.dat['nodes'][inst_rc]]
net.dat['nodes'][inst_rc] = [i[0] for i in net.dat['nodes'][inst_rc]]
if 'mat_up' in df:
net.dat['mat_up'] = df['mat_up'].values
net.dat['mat_dn'] = df['mat_dn'].values
if 'mat_orig' in df:
net.dat['mat_orig'] = df['mat_orig'].values
categories.dict_cat(net, define_cat_colors=define_cat_colors)
|
def df_to_dat(net, df, define_cat_colors=False)
|
This is always run when data is loaded.
| 3.234675 | 3.032104 | 1.066809 |
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray(self.dat['mat'])
|
def mat_to_numpy_arr(self)
|
convert list to numpy array - numpy arrays can not be saved as json
| 10.79878 | 4.99942 | 2.160007 |
''' cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument '''
import scipy
from copy import deepcopy
from scipy.spatial.distance import pdist
from . import categories, make_viz, cat_pval
dm = {}
for inst_rc in ['row', 'col']:
tmp_mat = deepcopy(net.dat['mat'])
dm[inst_rc] = calc_distance_matrix(tmp_mat, inst_rc, dist_type)
# save directly to dat structure
node_info = net.dat['node_info'][inst_rc]
node_info['ini'] = list(range( len(net.dat['nodes'][inst_rc]), -1, -1))
# cluster
if run_clustering is True:
node_info['clust'], node_info['group'] = \
clust_and_group(net, dm[inst_rc], linkage_type=linkage_type)
else:
dendro = False
node_info['clust'] = node_info['ini']
# sorting
if run_rank is True:
node_info['rank'] = sort_rank_nodes(net, inst_rc, 'sum')
node_info['rankvar'] = sort_rank_nodes(net, inst_rc, 'var')
else:
node_info['rank'] = node_info['ini']
node_info['rankvar'] = node_info['ini']
##################################
if ignore_cat is False:
categories.calc_cat_clust_order(net, inst_rc)
if calc_cat_pval is True:
cat_pval.main(net)
# make the visualization json
make_viz.viz_json(net, dendro, links)
return dm
|
def cluster_row_and_col(net, dist_type='cosine', linkage_type='average',
dendro=True, run_clustering=True, run_rank=True,
ignore_cat=False, calc_cat_pval=False, links=False)
|
cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument
| 4.439873 | 3.449449 | 1.287125 |
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if found_end is False:
num_rc = num_rc + 1
else:
found_end = True
max_rcat = 15
if max_rcat > len(lines):
max_rcat = len(lines) - 1
num_cc = 0
for i in range(max_rcat):
ccat_line = lines[i + 1].split('\t')
# make sure that line has length greater than one to prevent false cats from
# trailing new lines at end of matrix
if ccat_line[0] == '' and len(ccat_line) > 1:
num_cc = num_cc + 1
num_labels = {}
num_labels['row'] = num_rc + 1
num_labels['col'] = num_cc + 1
return num_labels
|
def check_categories(lines)
|
find out how many row and col categories are available
| 4.194168 | 3.729551 | 1.124577 |
'''
make a dictionary of node-category associations
'''
# print('---------------------------------')
# print('---- dict_cat: before setting cat colors')
# print('---------------------------------\n')
# print(define_cat_colors)
# print(net.viz['cat_colors'])
net.persistent_cat = True
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for inst_name_cat in all_cats:
dict_cat = {}
tmp_cats = net.dat['node_info'][inst_rc][inst_name_cat]
tmp_nodes = net.dat['nodes'][inst_rc]
for i in range(len(tmp_cats)):
inst_cat = tmp_cats[i]
inst_node = tmp_nodes[i]
if inst_cat not in dict_cat:
dict_cat[inst_cat] = []
dict_cat[inst_cat].append(inst_node)
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
net.dat['node_info'][inst_rc][tmp_name] = dict_cat
# merge with old cat_colors by default
cat_colors = net.viz['cat_colors']
if define_cat_colors == True:
cat_number = 0
for inst_rc in ['row', 'col']:
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
for cat_index in all_cats:
if cat_index not in cat_colors[inst_rc]:
cat_colors[inst_rc][cat_index] = {}
cat_names = sorted(list(set(net.dat['node_info'][inst_rc][cat_index])))
# loop through each category name and assign a color
for tmp_name in cat_names:
# using the same rules as the front-end to define cat_colors
inst_color = get_cat_color(cat_number + cat_names.index(tmp_name))
check_name = tmp_name
# check if category is string type and non-numeric
try:
float(check_name)
is_string_cat = False
except:
is_string_cat = True
if is_string_cat == True:
# check for default non-color
if ': ' in check_name:
check_name = check_name.split(': ')[1]
# if check_name == 'False' or check_name == 'false':
if 'False' in check_name or 'false' in check_name:
inst_color = '#eee'
if 'Not ' in check_name:
inst_color = '#eee'
# print('cat_colors')
# print('----------')
# print(cat_colors[inst_rc][cat_index])
# do not overwrite old colors
if tmp_name not in cat_colors[inst_rc][cat_index] and is_string_cat:
cat_colors[inst_rc][cat_index][tmp_name] = inst_color
# print('overwrite: ' + tmp_name + ' -> ' + str(inst_color))
cat_number = cat_number + 1
net.viz['cat_colors'] = cat_colors
|
def dict_cat(net, define_cat_colors=False)
|
make a dictionary of node-category associations
| 2.814317 | 2.763659 | 1.01833 |
'''
cluster category subset of data
'''
from .__init__ import Network
from copy import deepcopy
from . import calc_clust, run_filter
inst_keys = list(net.dat['node_info'][inst_rc].keys())
all_cats = [x for x in inst_keys if 'cat-' in x]
if len(all_cats) > 0:
for inst_name_cat in all_cats:
tmp_name = 'dict_' + inst_name_cat.replace('-', '_')
dict_cat = net.dat['node_info'][inst_rc][tmp_name]
unordered_cats = dict_cat.keys()
ordered_cats = order_categories(unordered_cats)
# this is the ordering of the columns based on their category, not
# including their clustering ordering within category
all_cat_orders = []
tmp_names_list = []
for inst_cat in ordered_cats:
inst_nodes = dict_cat[inst_cat]
tmp_names_list.extend(inst_nodes)
# cat_net = deepcopy(Network())
# cat_net.dat['mat'] = deepcopy(net.dat['mat'])
# cat_net.dat['nodes'] = deepcopy(net.dat['nodes'])
# cat_df = cat_net.dat_to_df()
# sub_df = {}
# if inst_rc == 'col':
# sub_df['mat'] = cat_df['mat'][inst_nodes]
# elif inst_rc == 'row':
# # need to transpose df
# cat_df['mat'] = cat_df['mat'].transpose()
# sub_df['mat'] = cat_df['mat'][inst_nodes]
# sub_df['mat'] = sub_df['mat'].transpose()
# # filter matrix before clustering
# ###################################
# threshold = 0.0001
# sub_df = run_filter.df_filter_row_sum(sub_df, threshold)
# sub_df = run_filter.df_filter_col_sum(sub_df, threshold)
# # load back to dat
# cat_net.df_to_dat(sub_df)
# cat_mat_shape = cat_net.dat['mat'].shape
# print('***************')
# try:
# if cat_mat_shape[0]>1 and cat_mat_shape[1] > 1 and all_are_numbers == False:
# calc_clust.cluster_row_and_col(cat_net, 'cos')
# inst_cat_order = cat_net.dat['node_info'][inst_rc]['clust']
# else:
# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))
# except:
# inst_cat_order = list(range(len(cat_net.dat['nodes'][inst_rc])))
# prev_order_len = len(all_cat_orders)
# # add prev order length to the current order number
# inst_cat_order = [i + prev_order_len for i in inst_cat_order]
# all_cat_orders.extend(inst_cat_order)
# # generate ordered list of row/col names, which will be used to
# # assign the order to specific nodes
# names_clust_list = [x for (y, x) in sorted(zip(all_cat_orders,
# tmp_names_list))]
names_clust_list = tmp_names_list
# calc category-cluster order
final_order = []
for i in range(len(net.dat['nodes'][inst_rc])):
inst_node_name = net.dat['nodes'][inst_rc][i]
inst_node_num = names_clust_list.index(inst_node_name)
final_order.append(inst_node_num)
inst_index_cat = inst_name_cat.replace('-', '_') + '_index'
net.dat['node_info'][inst_rc][inst_index_cat] = final_order
|
def calc_cat_clust_order(net, inst_rc)
|
cluster category subset of data
| 3.039285 | 2.975288 | 1.021509 |
'''
If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list.
'''
no_titles = remove_titles(unordered_cats)
all_are_numbers = check_all_numbers(no_titles)
if all_are_numbers:
ordered_cats = order_cats_based_on_values(unordered_cats, no_titles)
else:
ordered_cats = sorted(unordered_cats)
return ordered_cats
|
def order_categories(unordered_cats)
|
If categories are strings, then simple ordering is fine.
If categories are values then I'll need to order based on their values.
The final ordering is given as the original categories (including titles) in a
ordered list.
| 6.115845 | 2.214345 | 2.761921 |
'''
Load file as a string.
'''
load_data.load_file_as_string(self, file_string, filename=filename)
|
def load_file_as_string(self, file_string, filename='')
|
Load file as a string.
| 4.571833 | 4.003247 | 1.142031 |
'''
Load Clustergrammer's dat format (saved as JSON).
'''
inst_dat = self.load_json_to_dict(filename)
load_data.load_data_to_net(self, inst_dat)
|
def load_data_file_to_net(self, filename)
|
Load Clustergrammer's dat format (saved as JSON).
| 10.455465 | 4.024589 | 2.597896 |
'''
The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``.
'''
initialize_net.viz(self)
make_clust_fun.make_clust(self, dist_type=dist_type, run_clustering=run_clustering,
dendro=dendro,
requested_views=views,
linkage_type=linkage_type,
sim_mat=sim_mat,
filter_sim=filter_sim,
calc_cat_pval=calc_cat_pval,
run_enrichr=run_enrichr,
enrichrgram=enrichrgram)
|
def cluster(self, dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=None, enrichrgram=None)
|
The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``.
| 4.494601 | 2.32679 | 1.931675 |
'''
Load Pandas DataFrame.
'''
# self.__init__()
self.reset()
df_dict = {}
df_dict['mat'] = deepcopy(df)
# always define category colors if applicable when loading a df
data_formats.df_to_dat(self, df_dict, define_cat_colors=True)
|
def load_df(self, df)
|
Load Pandas DataFrame.
| 14.096099 | 12.843867 | 1.097496 |
'''
Load Pandas DataFrame (will be deprecated).
'''
data_formats.df_to_dat(self, df, define_cat_colors)
|
def df_to_dat(self, df, define_cat_colors=False)
|
Load Pandas DataFrame (will be deprecated).
| 10.156842 | 4.452537 | 2.281136 |
'''
Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end.
'''
if hasattr(self, 'widget_class') == True:
# run clustering if necessary
if len(self.viz['row_nodes']) == 0:
self.cluster()
self.widget_instance = self.widget_class(network = self.export_viz_to_widget(which_viz))
return self.widget_instance
else:
print('Can not make widget because Network has no attribute widget_class')
print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)')
|
def widget(self, which_viz='viz')
|
Generate a widget visualization using the widget. The export_viz_to_widget
method passes the visualization JSON to the instantiated widget, which is
returned and visualized on the front-end.
| 8.29629 | 4.326435 | 1.917581 |
'''
Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method.
'''
if hasattr(self, 'widget_instance') == True:
if self.widget_instance.mat_string != '':
tmp_net = deepcopy(Network())
df_string = self.widget_instance.mat_string
tmp_net.load_file_as_string(df_string)
df = tmp_net.export_df()
return df
else:
return self.export_df()
else:
if hasattr(self, 'widget_class') == True:
print('Please make the widget before exporting the widget DataFrame.')
print('Do this using the widget method: net.widget()')
else:
print('Can not make widget because Network has no attribute widget_class')
print('Please instantiate Network with clustergrammer_widget using: Network(clustergrammer_widget)')
|
def widget_df(self)
|
Export a DataFrame from the front-end visualization. For instance, a user
can filter to show only a single cluster using the dendrogram and then
get a dataframe of this cluster using the widget_df method.
| 7.456887 | 4.46862 | 1.668723 |
'''
Save dat or viz as a JSON to file.
'''
export_data.write_json_to_file(self, net_type, filename, indent)
|
def write_json_to_file(self, net_type, filename, indent='no-indent')
|
Save dat or viz as a JSON to file.
| 9.580324 | 3.421215 | 2.80027 |
'''
Filter a network's rows or columns based on the sum across rows or columns.
'''
inst_df = self.dat_to_df()
if inst_rc == 'row':
inst_df = run_filter.df_filter_row_sum(inst_df, threshold, take_abs)
elif inst_rc == 'col':
inst_df = run_filter.df_filter_col_sum(inst_df, threshold, take_abs)
self.df_to_dat(inst_df)
|
def filter_sum(self, inst_rc, threshold, take_abs=True)
|
Filter a network's rows or columns based on the sum across rows or columns.
| 3.15746 | 2.483143 | 1.271558 |
'''
Filter the matrix rows or columns based on sum/variance, and only keep the top
N.
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_N_top(inst_rc, inst_df, N_top, rank_type)
self.df_to_dat(inst_df)
|
def filter_N_top(self, inst_rc, N_top, rank_type='sum')
|
Filter the matrix rows or columns based on sum/variance, and only keep the top
N.
| 5.721913 | 3.169202 | 1.805475 |
'''
Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value).
'''
inst_df = self.dat_to_df()
inst_df = run_filter.filter_threshold(inst_df, inst_rc, threshold,
num_occur)
self.df_to_dat(inst_df)
|
def filter_threshold(self, inst_rc, threshold, num_occur=1)
|
Filter the matrix rows or columns based on num_occur values being above a
threshold (in absolute value).
| 6.373182 | 3.442139 | 1.851518 |
'''
Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1.
'''
run_filter.filter_cat(self, axis, cat_index, cat_name)
|
def filter_cat(self, axis, cat_index, cat_name)
|
Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1.
| 7.433342 | 2.904372 | 2.559363 |
'''
Trim values at input thresholds using pandas function
'''
df = self.export_df()
df = df.clip(lower=lower, upper=upper)
self.load_df(df)
|
def clip(self, lower=None, upper=None)
|
Trim values at input thresholds using pandas function
| 8.466238 | 3.902894 | 2.169221 |
'''
Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object).
'''
normalize_fun.run_norm(self, df, norm_type, axis, keep_orig)
|
def normalize(self, df=None, norm_type='zscore', axis='row', keep_orig=False)
|
Normalize the matrix rows or columns using Z-score (zscore) or Quantile Normalization (qn). Users can optionally pass in a DataFrame to be normalized (and this will be incorporated into the Network object).
| 10.732241 | 2.51876 | 4.260922 |
'''
Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object).
'''
return downsample_fun.main(self, df, ds_type, axis, num_samples, random_state)
|
def downsample(self, df=None, ds_type='kmeans', axis='row', num_samples=100, random_state=1000)
|
Downsample the matrix rows or columns (currently supporting kmeans only). Users can optionally pass in a DataFrame to be downsampled (and this will be incorporated into the network object).
| 11.35239 | 2.695847 | 4.211066 |
'''
Return random sample of matrix.
'''
if df is None:
df = self.dat_to_df()
if axis == 'row':
axis = 0
if axis == 'col':
axis = 1
df = self.export_df()
df = df.sample(n=num_samples, replace=replace, weights=weights, random_state=random_state, axis=axis)
self.load_df(df)
|
def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row')
|
Return random sample of matrix.
| 3.369272 | 2.992014 | 1.126088 |
'''
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
'''
for inst_data in cat_data:
categories.add_cats(self, axis, inst_data)
|
def add_cats(self, axis, cat_data)
|
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
| 7.195297 | 1.500161 | 4.796349 |
'''
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
'''
df = self.export_df()
df, bar_info = enr_fun.add_enrichr_cats(df, axis, lib)
self.load_df(df)
self.dat['enrichrgram_lib'] = lib
self.dat['row_cat_bars'] = bar_info
|
def enrichrgram(self, lib, axis='row')
|
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
| 5.336521 | 1.569959 | 3.399146 |
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = pd.Series(ini_genes).value_counts()
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
dup_index = {}
genes = []
for inst_row in ini_genes:
# add index to non-unique genes
if inst_row in duplicate_genes:
# calc_non-unque index
if inst_row not in dup_index:
dup_index[inst_row] = 1
else:
dup_index[inst_row] = dup_index[inst_row] + 1
new_row = inst_row + '_' + str(dup_index[inst_row])
else:
new_row = inst_row
genes.append(new_row)
# barcodes
filename = inst_path + 'barcodes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
cell_barcodes = []
for inst_bc in lines:
inst_bc = inst_bc.strip().split('\t')
# remove dash from barcodes if necessary
if '-' in inst_bc[0]:
inst_bc[0] = inst_bc[0].split('-')[0]
cell_barcodes.append(inst_bc[0])
# parse tuples if necessary
try:
cell_barcodes = [make_tuple(x) for x in cell_barcodes]
except:
pass
try:
genes = [make_tuple(x) for x in genes]
except:
pass
# make dataframe
df = pd.DataFrame(mat, index=genes, columns=cell_barcodes)
return df
|
def load_gene_exp_to_df(inst_path)
|
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
| 2.245439 | 2.134105 | 1.052169 |
'''
Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category
'''
cols = df.columns.tolist()
if type(precalc_dist) == bool:
# compute distnace between rows (transpose to get cols as rows)
dist_arr = 1 - pdist(df.transpose(), metric=dist_type)
else:
dist_arr = precalc_dist
# generate sample names with categories
sample_combos = list(combinations(range(df.shape[1]),2))
sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)]
ser_dist = pd.Series(data=dist_arr, index=sample_names)
# find same-cat sample comparisons
same_cat = [x for x in sample_names if x.split('_')[1] == 'same']
# find diff-cat sample comparisons
diff_cat = [x for x in sample_names if x.split('_')[1] == 'different']
# make series of same and diff category sample comparisons
ser_same = ser_dist[same_cat]
ser_same.name = 'Same Category'
ser_diff = ser_dist[diff_cat]
ser_diff.name = 'Different Category'
sim_dict = {}
roc_data = {}
sim_data = {}
sim_dict['same'] = ser_same
sim_dict['diff'] = ser_diff
pval_dict = {}
ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var)
ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same)
if calc_roc:
# calc AUC
true_index = list(np.ones(sim_dict['same'].shape[0]))
false_index = list(np.zeros(sim_dict['diff'].shape[0]))
y_true = true_index + false_index
true_val = list(sim_dict['same'].get_values())
false_val = list(sim_dict['diff'].get_values())
y_score = true_val + false_val
fpr, tpr, thresholds = roc_curve(y_true, y_score)
inst_auc = auc(fpr, tpr)
if plot_roc:
plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.figure(figsize=(10,10))
print('AUC', inst_auc)
roc_data['true'] = y_true
roc_data['score'] = y_score
roc_data['fpr'] = fpr
roc_data['tpr'] = tpr
roc_data['thresholds'] = thresholds
roc_data['auc'] = inst_auc
sim_data['sim_dict'] = sim_dict
sim_data['pval_dict'] = pval_dict
sim_data['roc_data'] = roc_data
return sim_data
|
def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine',
equal_var=False, plot_roc=True,
precalc_dist=False, calc_roc=True)
|
Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category
| 2.313072 | 2.156646 | 1.072532 |
''' Generate signatures for column categories '''
df_t = df_ini.transpose()
# remove columns with constant values
df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()]
df = self.row_tuple_to_multiindex(df_t)
cell_types = sorted(list(set(df.index.get_level_values(category_level).tolist())))
keep_genes = []
keep_genes_dict = {}
gene_pval_dict = {}
all_fold_info = {}
for inst_ct in cell_types:
inst_ct_mat = df.xs(key=inst_ct, level=category_level)
inst_other_mat = df.drop(inst_ct, level=category_level)
# save mean values and fold change
fold_info = {}
fold_info['cluster_mean'] = inst_ct_mat.mean()
fold_info['other_mean'] = inst_other_mat.mean()
fold_info['log2_fold'] = fold_info['cluster_mean']/fold_info['other_mean']
fold_info['log2_fold'] = fold_info['log2_fold'].apply(np.log2)
all_fold_info[inst_ct] = fold_info
inst_stats, inst_pvals = ttest_ind(inst_ct_mat, inst_other_mat, axis=0, equal_var=equal_var)
ser_pval = pd.Series(data=inst_pvals, index=df.columns.tolist()).sort_values()
if num_top_dims == False:
ser_pval_keep = ser_pval[ser_pval < pval_cutoff]
else:
ser_pval_keep = ser_pval[:num_top_dims]
gene_pval_dict[inst_ct] = ser_pval_keep
inst_keep = ser_pval_keep.index.tolist()
keep_genes.extend(inst_keep)
keep_genes_dict[inst_ct] = inst_keep
keep_genes = sorted(list(set(keep_genes)))
df_gbm = df.groupby(level=category_level).mean().transpose()
cols = df_gbm.columns.tolist()
new_cols = []
for inst_col in cols:
new_col = (inst_col, category_level + ': ' + inst_col)
new_cols.append(new_col)
df_gbm.columns = new_cols
df_sig = df_gbm.ix[keep_genes]
if len(keep_genes) == 0 and verbose:
print('found no informative dimensions')
df_gene_pval = pd.concat(gene_pval_dict, axis=1, sort=False)
return df_sig, keep_genes_dict, df_gene_pval, all_fold_info
|
def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05,
num_top_dims=False, verbose=True, equal_var=False)
|
Generate signatures for column categories
| 2.431461 | 2.429037 | 1.000998 |
''' Predict category using signature '''
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# calculate sim_mat of df_data and df_sig
cell_types = df_sig.columns.tolist()
barcodes = df_data.columns.tolist()
sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type)
df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose()
# get the top column value (most similar signature)
df_sim_top = df_sim.idxmax(axis=1)
# get the maximum similarity of a cell to a cell type definition
max_sim = df_sim.max(axis=1)
unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist()
# assign unknown cells (need category of same name)
df_sim_top[unknown_cells] = 'Unknown'
# add predicted category name to top list
top_list = df_sim_top.get_values()
top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list]
# add cell type category to input data
df_cat = deepcopy(df_data)
cols = df_cat.columns.tolist()
new_cols = []
# check whether the columns have the true category available
has_truth = False
if type(cols[0]) is tuple:
has_truth = True
if has_truth:
new_cols = [tuple(list(a) + [b]) for a,b in zip(cols, top_list)]
else:
new_cols = [tuple([a] + [b]) for a,b in zip(cols, top_list)]
# transfer new categories
df_cat.columns = new_cols
# keep track of true and predicted labels
y_info = {}
y_info['true'] = []
y_info['pred'] = []
if has_truth:
y_info['true'] = [x[truth_level].split(': ')[1] for x in cols]
y_info['pred'] = [x.split(': ')[1] for x in top_list]
return df_cat, df_sim.transpose(), y_info
|
def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category',
truth_level=1, unknown_thresh=-1)
|
Predict category using signature
| 2.951412 | 2.955857 | 0.998496 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.