code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# FIXME: What are we guarding for here? We don't mention that None is
# allowed as a value fo cmd.
if cmd:
if cmd.prehook:
await cmd.prehook(ui=self, dbm=self.dbman, cmd=cmd)
try:
if asyncio.iscoroutinefunction(cmd.apply):
await cmd.apply(self)
else:
cmd.apply(self)
except Exception as e:
self._error_handler(e)
else:
if cmd.posthook:
logging.info('calling post-hook')
await cmd.posthook(ui=self, dbm=self.dbman, cmd=cmd) | async def apply_command(self, cmd) | applies a command
This calls the pre and post hooks attached to the command,
as well as :meth:`cmd.apply`.
:param cmd: an applicable command
:type cmd: :class:`~alot.commands.Command` | 4.589528 | 4.408249 | 1.041123 |
# it is a SIGINT ?
if signum == signal.SIGINT:
logging.info('shut down cleanly')
asyncio.ensure_future(self.apply_command(globals.ExitCommand()))
elif signum == signal.SIGUSR1:
if isinstance(self.current_buffer, SearchBuffer):
self.current_buffer.rebuild()
self.update() | def handle_signal(self, signum, frame) | handles UNIX signals
This function currently just handles SIGUSR1. It could be extended to
handle more
:param signum: The signal number (see man 7 signal)
:param frame: The execution frame
(https://docs.python.org/2/reference/datamodel.html#frame-objects) | 6.999479 | 7.80958 | 0.896268 |
size = settings.get('history_size')
self._save_history_to_file(self.commandprompthistory,
self._cmd_hist_file, size=size)
self._save_history_to_file(self.senderhistory, self._sender_hist_file,
size=size)
self._save_history_to_file(self.recipienthistory,
self._recipients_hist_file, size=size) | def cleanup(self) | Do the final clean up before shutting down. | 4.199505 | 3.934913 | 1.067242 |
if size == 0:
return []
if os.path.exists(path):
with codecs.open(path, 'r', encoding='utf-8') as histfile:
lines = [line.rstrip('\n') for line in histfile]
if size > 0:
lines = lines[-size:]
return lines
else:
return [] | def _load_history_from_file(path, size=-1) | Load a history list from a file and split it into lines.
:param path: the path to the file that should be loaded
:type path: str
:param size: the number of lines to load (0 means no lines, < 0 means
all lines)
:type size: int
:returns: a list of history items (the lines of the file)
:rtype: list(str) | 2.259995 | 2.322355 | 0.973148 |
if size == 0:
return
if size > 0:
history = history[-size:]
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
# Write linewise to avoid building a large string in menory.
with codecs.open(path, 'w', encoding='utf-8') as histfile:
for line in history:
histfile.write(line)
histfile.write('\n') | def _save_history_to_file(history, path, size=-1) | Save a history list to a file for later loading (possibly in another
session).
:param history: the history list to save
:type history: list(str)
:param path: the path to the file where to save the history
:param size: the number of lines to save (0 means no lines, < 0 means
all lines)
:type size: int
:type path: str
:returns: None | 3.382025 | 3.52141 | 0.960418 |
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-r', '--read-only', action='store_true',
help='open notmuch database in read-only mode')
parser.add_argument('-c', '--config', metavar='FILENAME',
action=cargparse.ValidatedStoreAction,
validator=cargparse.require_file,
help='configuration file')
parser.add_argument('-n', '--notmuch-config', metavar='FILENAME',
default=os.environ.get(
'NOTMUCH_CONFIG',
os.path.expanduser('~/.notmuch-config')),
action=cargparse.ValidatedStoreAction,
validator=cargparse.require_file,
help='notmuch configuration file')
parser.add_argument('-C', '--colour-mode', metavar='COLOURS',
choices=(1, 16, 256), type=int,
help='number of colours to use')
parser.add_argument('-p', '--mailindex-path', metavar='PATH',
action=cargparse.ValidatedStoreAction,
validator=cargparse.require_dir,
help='path to notmuch index')
parser.add_argument('-d', '--debug-level', metavar='LEVEL', default='info',
choices=('debug', 'info', 'warning', 'error'),
help='debug level [default: %(default)s]')
parser.add_argument('-l', '--logfile', metavar='FILENAME',
default='/dev/null',
action=cargparse.ValidatedStoreAction,
validator=cargparse.optional_file_like,
help='log file [default: %(default)s]')
parser.add_argument('-h', '--help', action='help',
help='display this help and exit')
parser.add_argument('-v', '--version', action='version',
version=alot.__version__,
help='output version information and exit')
# We will handle the subcommands in a separate run of argparse as argparse
# does not support optional subcommands until now.
parser.add_argument('command', nargs=argparse.REMAINDER,
help='possible subcommands are {}'.format(
', '.join(_SUBCOMMANDS)))
options = parser.parse_args()
if options.command:
# We have a command after the initial options so we also parse that.
# But we just use the parser that is already defined for the internal
# command that will back this subcommand.
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand')
for subcommand in _SUBCOMMANDS:
subparsers.add_parser(subcommand,
parents=[COMMANDS['global'][subcommand][1]])
command = parser.parse_args(options.command)
else:
command = None
return options, command | def parser() | Parse command line arguments, validate them, and return them. | 2.917124 | 2.874839 | 1.014709 |
options, command = parser()
# logging
root_logger = logging.getLogger()
for log_handler in root_logger.handlers:
root_logger.removeHandler(log_handler)
root_logger = None
numeric_loglevel = getattr(logging, options.debug_level.upper(), None)
logformat = '%(levelname)s:%(module)s:%(message)s'
logging.basicConfig(level=numeric_loglevel, filename=options.logfile,
filemode='w', format=logformat)
# locate alot config files
cpath = options.config
if options.config is None:
xdg_dir = get_xdg_env('XDG_CONFIG_HOME',
os.path.expanduser('~/.config'))
alotconfig = os.path.join(xdg_dir, 'alot', 'config')
if os.path.exists(alotconfig):
cpath = alotconfig
try:
settings.read_config(cpath)
settings.read_notmuch_config(options.notmuch_config)
except (ConfigError, OSError, IOError) as e:
print('Error when parsing a config file. '
'See log for potential details.')
sys.exit(e)
# store options given by config swiches to the settingsManager:
if options.colour_mode:
settings.set('colourmode', options.colour_mode)
# get ourselves a database manager
indexpath = settings.get_notmuch_setting('database', 'path')
indexpath = options.mailindex_path or indexpath
dbman = DBManager(path=indexpath, ro=options.read_only)
# determine what to do
if command is None:
try:
cmdstring = settings.get('initial_command')
except CommandParseError as err:
sys.exit(err)
elif command.subcommand in _SUBCOMMANDS:
cmdstring = ' '.join(options.command)
# set up and start interface
UI(dbman, cmdstring)
# run the exit hook
exit_hook = settings.get_hook('exit')
if exit_hook is not None:
exit_hook() | def main() | The main entry point to alot. It parses the command line and prepares
for the user interface main loop to run. | 4.50148 | 4.349855 | 1.034857 |
encrypt_keys = []
for header in ('To', 'Cc'):
if header not in envelope.headers:
continue
for recipient in envelope.headers[header][0].split(','):
if not recipient:
continue
match = re.search("<(.*@.*)>", recipient)
if match:
recipient = match.group(1)
encrypt_keys.append(recipient)
logging.debug("encryption keys: " + str(encrypt_keys))
keys = await _get_keys(ui, encrypt_keys, block_error=block_error,
signed_only=signed_only)
if keys:
envelope.encrypt_keys = keys
envelope.encrypt = True
if 'From' in envelope.headers:
try:
if envelope.account is None:
envelope.account = settings.account_matching_address(
envelope['From'])
acc = envelope.account
if acc.encrypt_to_self:
if acc.gpg_key:
logging.debug('encrypt to self: %s', acc.gpg_key.fpr)
envelope.encrypt_keys[acc.gpg_key.fpr] = acc.gpg_key
else:
logging.debug('encrypt to self: no gpg_key in account')
except NoMatchingAccount:
logging.debug('encrypt to self: no account found')
else:
envelope.encrypt = False | async def update_keys(ui, envelope, block_error=False, signed_only=False) | Find and set the encryption keys in an envolope.
:param ui: the main user interface object
:type ui: alot.ui.UI
:param envolope: the envolope buffer object
:type envolope: alot.buffers.EnvelopeBuffer
:param block_error: wether error messages for the user should expire
automatically or block the ui
:type block_error: bool
:param signed_only: only use keys whose uid is signed (trusted to belong
to the key)
:type signed_only: bool | 3.246516 | 3.357555 | 0.966928 |
keys = {}
for keyid in encrypt_keyids:
try:
key = crypto.get_key(keyid, validate=True, encrypt=True,
signed_only=signed_only)
except GPGProblem as e:
if e.code == GPGCode.AMBIGUOUS_NAME:
tmp_choices = ['{} ({})'.format(k.uids[0].uid, k.fpr) for k in
crypto.list_keys(hint=keyid)]
choices = {str(i): t for i, t in enumerate(tmp_choices, 1)}
keys_to_return = {str(i): t for i, t in enumerate([k for k in
crypto.list_keys(hint=keyid)], 1)}
choosen_key = await ui.choice("ambiguous keyid! Which " +
"key do you want to use?",
choices=choices,
choices_to_return=keys_to_return)
if choosen_key:
keys[choosen_key.fpr] = choosen_key
continue
else:
ui.notify(str(e), priority='error', block=block_error)
continue
keys[key.fpr] = key
return keys | async def _get_keys(ui, encrypt_keyids, block_error=False, signed_only=False) | Get several keys from the GPG keyring. The keys are selected by keyid
and are checked if they can be used for encryption.
:param ui: the main user interface object
:type ui: alot.ui.UI
:param encrypt_keyids: the key ids of the keys to get
:type encrypt_keyids: list(str)
:param block_error: wether error messages for the user should expire
automatically or block the ui
:type block_error: bool
:param signed_only: only return keys whose uid is signed (trusted to belong
to the key)
:type signed_only: bool
:returns: the available keys indexed by their OpenPGP fingerprint
:rtype: dict(str->gpg key object) | 3.782822 | 3.726058 | 1.015234 |
assert isinstance(address, str), 'address must be str'
username, domainname = address.split('@')
return cls(username, domainname, case_sensitive=case_sensitive) | def from_string(cls, address, case_sensitive=False) | Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account` | 3.480404 | 3.737183 | 0.931291 |
if isinstance(other, str):
try:
ouser, odomain = other.split('@')
except ValueError:
ouser, odomain = '', ''
else:
ouser = other.username
odomain = other.domainname
if not self.case_sensitive:
ouser = ouser.lower()
username = self.username.lower()
else:
username = self.username
return (comparitor(username, ouser) and
comparitor(self.domainname.lower(), odomain.lower())) | def __cmp(self, other, comparitor) | Shared helper for rich comparison operators.
This allows the comparison operators to be relatively simple and share
the complex logic.
If the username is not considered case sensitive then lower the
username of both self and the other, and handle that the other can be
either another :class:`~alot.account.Address`, or a `str` instance.
:param other: The other address to compare against
:type other: str or ~alot.account.Address
:param callable comparitor: A function with the a signature
(str, str) -> bool that will compare the two instance.
The intention is to use functions from the operator module. | 2.759198 | 2.575578 | 1.071293 |
if self.address == address:
return True
for alias in self.aliases:
if alias == address:
return True
if self._alias_regexp and self._alias_regexp.match(address):
return True
return False | def matches_address(self, address) | returns whether this account knows about an email address
:param str address: address to look up
:rtype: bool | 2.927958 | 3.052577 | 0.959176 |
if not isinstance(mbx, mailbox.Mailbox):
logging.debug('Not a mailbox')
return False
mbx.lock()
if isinstance(mbx, mailbox.Maildir):
logging.debug('Maildir')
msg = mailbox.MaildirMessage(mail)
msg.set_flags('S')
else:
logging.debug('no Maildir')
msg = mailbox.Message(mail)
try:
message_id = mbx.add(msg)
mbx.flush()
mbx.unlock()
logging.debug('got mailbox msg id : %s', message_id)
except Exception as e:
raise StoreMailError(e)
path = None
# add new Maildir message to index and add tags
if isinstance(mbx, mailbox.Maildir):
# this is a dirty hack to get the path to the newly added file
# I wish the mailbox module were more helpful...
plist = glob.glob1(os.path.join(mbx._path, 'new'),
message_id + '*')
if plist:
path = os.path.join(mbx._path, 'new', plist[0])
logging.debug('path of saved msg: %s', path)
return path | def store_mail(mbx, mail) | stores given mail in mailbox. If mailbox is maildir, set the S-flag and
return path to newly added mail. Oherwise this will return `None`.
:param mbx: mailbox to use
:type mbx: :class:`mailbox.Mailbox`
:param mail: the mail to store
:type mail: :class:`email.message.Message` or str
:returns: absolute path of mail-file for Maildir or None if mail was
successfully stored
:rtype: str or None
:raises: StoreMailError | 4.031623 | 3.757093 | 1.07307 |
if self.sent_box is not None:
return self.store_mail(self.sent_box, mail) | def store_sent_mail(self, mail) | stores mail (:class:`email.message.Message` or str) in send-store if
:attr:`sent_box` is set. | 5.361234 | 3.436546 | 1.560065 |
if self.draft_box is not None:
return self.store_mail(self.draft_box, mail) | def store_draft_mail(self, mail) | stores mail (:class:`email.message.Message` or str) as draft if
:attr:`draft_box` is set. | 4.79844 | 3.550005 | 1.351672 |
cmdlist = split_commandstring(self.cmd)
try:
# make sure self.mail is a string
out, err, code = await call_cmd_async(cmdlist, stdin=str(mail))
if code != 0:
msg = 'The sendmail command {} returned with code {}{}'.format(
self.cmd, code, ':\n' + err.strip() if err else '.')
raise Exception(msg)
except Exception as e:
logging.error(str(e))
raise SendingMailFailed(str(e))
logging.info('sent mail successfully')
logging.info(out) | async def send_mail(self, mail) | Pipe the given mail to the configured sendmail command. Display a
short message on success or a notification on error.
:param mail: the mail to send out
:type mail: :class:`email.message.Message` or string
:raises: class:`SendingMailFailed` if sending failes | 5.081961 | 4.648539 | 1.093238 |
if not thread:
thread = self._dbman._get_notmuch_thread(self._id)
self._total_messages = thread.get_total_messages()
self._notmuch_authors_string = thread.get_authors()
subject_type = settings.get('thread_subject')
if subject_type == 'notmuch':
subject = thread.get_subject()
elif subject_type == 'oldest':
try:
first_msg = list(thread.get_toplevel_messages())[0]
subject = first_msg.get_header('subject')
except IndexError:
subject = ''
self._subject = subject
self._authors = None
ts = thread.get_oldest_date()
try:
self._oldest_date = datetime.fromtimestamp(ts)
except ValueError: # year is out of range
self._oldest_date = None
try:
timestamp = thread.get_newest_date()
self._newest_date = datetime.fromtimestamp(timestamp)
except ValueError: # year is out of range
self._newest_date = None
self._tags = {t for t in thread.get_tags()}
self._messages = {} # this maps messages to its children
self._toplevel_messages = [] | def refresh(self, thread=None) | refresh thread metadata from the index | 3.567791 | 3.477476 | 1.025971 |
tags = set(list(self._tags))
if intersection:
for m in self.get_messages().keys():
tags = tags.intersection(set(m.get_tags()))
return tags | def get_tags(self, intersection=False) | returns tagsstrings attached to this thread
:param intersection: return tags present in all contained messages
instead of in at least one (union)
:type intersection: bool
:rtype: set of str | 4.235623 | 3.700846 | 1.144501 |
def myafterwards():
if remove_rest:
self._tags = set(tags)
else:
self._tags = self._tags.union(tags)
if callable(afterwards):
afterwards()
self._dbman.tag('thread:' + self._id, tags, afterwards=myafterwards,
remove_rest=remove_rest) | def add_tags(self, tags, afterwards=None, remove_rest=False) | add `tags` to all messages in this thread
.. note::
This only adds the requested operation to this objects
:class:`DBManager's <alot.db.DBManager>` write queue.
You need to call :meth:`DBManager.flush <alot.db.DBManager.flush>`
to actually write out.
:param tags: a list of tags to be added
:type tags: list of str
:param afterwards: callback that gets called after successful
application of this tagging operation
:type afterwards: callable
:param remove_rest: remove all other tags
:type remove_rest: bool | 5.671422 | 5.253062 | 1.079641 |
rmtags = set(tags).intersection(self._tags)
if rmtags:
def myafterwards():
self._tags = self._tags.difference(tags)
if callable(afterwards):
afterwards()
self._dbman.untag('thread:' + self._id, tags, myafterwards)
self._tags = self._tags.difference(rmtags) | def remove_tags(self, tags, afterwards=None) | remove `tags` (list of str) from all messages in this thread
.. note::
This only adds the requested operation to this objects
:class:`DBManager's <alot.db.DBManager>` write queue.
You need to call :meth:`DBManager.flush <alot.db.DBManager.flush>`
to actually write out.
:param tags: a list of tags to be added
:type tags: list of str
:param afterwards: callback that gets called after successful
application of this tagging operation
:type afterwards: callable | 5.182176 | 4.670341 | 1.109593 |
if self._authors is None:
# Sort messages with date first (by date ascending), and those
# without a date last.
msgs = sorted(self.get_messages().keys(),
key=lambda m: m.get_date() or datetime.max)
orderby = settings.get('thread_authors_order_by')
self._authors = []
if orderby == 'latest_message':
for m in msgs:
pair = m.get_author()
if pair in self._authors:
self._authors.remove(pair)
self._authors.append(pair)
else: # i.e. first_message
for m in msgs:
pair = m.get_author()
if pair not in self._authors:
self._authors.append(pair)
return self._authors | def get_authors(self) | returns a list of authors (name, addr) of the messages.
The authors are ordered by msg date and unique (by name/addr).
:rtype: list of (str, str) | 3.751356 | 3.709791 | 1.011204 |
if replace_own is None:
replace_own = settings.get('thread_authors_replace_me')
if replace_own:
if own_accts is None:
own_accts = settings.get_accounts()
authorslist = []
for aname, aaddress in self.get_authors():
for account in own_accts:
if account.matches_address(aaddress):
aname = settings.get('thread_authors_me')
break
if not aname:
aname = aaddress
if aname not in authorslist:
authorslist.append(aname)
return ', '.join(authorslist)
else:
return self._notmuch_authors_string | def get_authors_string(self, own_accts=None, replace_own=None) | returns a string of comma-separated authors
Depending on settings, it will substitute "me" for author name if
address is user's own.
:param own_accts: list of own accounts to replace
:type own_accts: list of :class:`Account`
:param replace_own: whether or not to actually do replacement
:type replace_own: bool
:rtype: str | 3.19508 | 2.904694 | 1.099971 |
if not self._messages: # if not already cached
query = self._dbman.query('thread:' + self._id)
thread = next(query.search_threads())
def accumulate(acc, msg):
M = Message(self._dbman, msg, thread=self)
acc[M] = []
r = msg.get_replies()
if r is not None:
for m in r:
acc[M].append(accumulate(acc, m))
return M
self._messages = {}
for m in thread.get_toplevel_messages():
self._toplevel_messages.append(accumulate(self._messages, m))
return self._messages | def get_messages(self) | returns all messages in this thread as dict mapping all contained
messages to their direct responses.
:rtype: dict mapping :class:`~alot.db.message.Message` to a list of
:class:`~alot.db.message.Message`. | 4.722371 | 4.43014 | 1.065964 |
mid = msg.get_message_id()
msg_hash = self.get_messages()
for m in msg_hash.keys():
if m.get_message_id() == mid:
return msg_hash[m]
return None | def get_replies_to(self, msg) | returns all replies to the given message contained in this thread.
:param msg: parent message to look up
:type msg: :class:`~alot.db.message.Message`
:returns: list of :class:`~alot.db.message.Message` or `None` | 3.613748 | 3.884482 | 0.930304 |
thread_query = 'thread:{tid} AND {subquery}'.format(tid=self._id,
subquery=query)
num_matches = self._dbman.count_messages(thread_query)
return num_matches > 0 | def matches(self, query) | Check if this thread matches the given notmuch query.
:param query: The query to check against
:type query: string
:returns: True if this thread matches the given query, False otherwise
:rtype: bool | 7.636497 | 7.194108 | 1.061493 |
if cmdname in COMMANDS[mode]:
return COMMANDS[mode][cmdname]
elif cmdname in COMMANDS['global']:
return COMMANDS['global'][cmdname]
else:
return None, None, None | def lookup_command(cmdname, mode) | returns commandclass, argparser and forced parameters used to construct
a command for `cmdname` when called in `mode`.
:param cmdname: name of the command to look up
:type cmdname: str
:param mode: mode identifier
:type mode: str
:rtype: (:class:`Command`, :class:`~argparse.ArgumentParser`,
dict(str->dict)) | 2.424243 | 2.616003 | 0.926697 |
# split commandname and parameters
if not cmdline:
return None
logging.debug('mode:%s got commandline "%s"', mode, cmdline)
# allow to shellescape without a space after '!'
if cmdline.startswith('!'):
cmdline = 'shellescape \'%s\'' % cmdline[1:]
cmdline = re.sub(r'"(.*)"', r'"\\"\1\\""', cmdline)
try:
args = split_commandstring(cmdline)
except ValueError as e:
raise CommandParseError(str(e))
args = [string_decode(x, 'utf-8') for x in args]
logging.debug('ARGS: %s', args)
cmdname = args[0]
args = args[1:]
# unfold aliases
# TODO: read from settingsmanager
# get class, argparser and forced parameter
(cmdclass, parser, forcedparms) = lookup_command(cmdname, mode)
if cmdclass is None:
msg = 'unknown command: %s' % cmdname
logging.debug(msg)
raise CommandParseError(msg)
parms = vars(parser.parse_args(args))
parms.update(forcedparms)
logging.debug('cmd parms %s', parms)
# create Command
cmd = cmdclass(**parms)
# set pre and post command hooks
get_hook = settings.get_hook
cmd.prehook = get_hook('pre_%s_%s' % (mode, cmdname)) or \
get_hook('pre_global_%s' % cmdname)
cmd.posthook = get_hook('post_%s_%s' % (mode, cmdname)) or \
get_hook('post_global_%s' % cmdname)
return cmd | def commandfactory(cmdline, mode='global') | parses `cmdline` and constructs a :class:`Command`.
:param cmdline: command line to interpret
:type cmdline: str
:param mode: mode identifier
:type mode: str | 3.631303 | 3.642328 | 0.996973 |
cols, _ = self.taglist.get_focus()
tagwidget = cols.original_widget.get_focus()
return tagwidget.tag | def get_selected_tag(self) | returns selected tagstring | 12.843421 | 11.57061 | 1.110004 |
out = []
for match in re.finditer(ATTR_RE, chart):
if match.group('whitespace'):
out.append(match.group('whitespace'))
entry = match.group('entry')
entry = entry.replace("_", " ")
while entry:
# try the first four characters
attrtext = convert(entry[:SHORT_ATTR])
if attrtext:
elen = SHORT_ATTR
entry = entry[SHORT_ATTR:].strip()
else: # try the whole thing
attrtext = convert(entry.strip())
assert attrtext, "Invalid palette entry: %r" % entry
elen = len(entry)
entry = ""
attr, text = attrtext
out.append((attr, text.ljust(elen)))
return out | def parse_chart(chart, convert) | Convert string chart into text markup with the correct attributes.
chart -- palette chart as a string
convert -- function that converts a single palette entry to an
(attr, text) tuple, or None if no match is found | 4.313708 | 3.698584 | 1.166313 |
def convert_foreground(entry):
try:
attr = urwid.AttrSpec(entry, background, colors)
except urwid.AttrSpecError:
return None
return attr, entry
return parse_chart(chart, convert_foreground) | def foreground_chart(chart, background, colors) | Create text markup for a foreground colour chart
chart -- palette chart as string
background -- colour to use for background of chart
colors -- number of colors (88 or 256) | 7.039362 | 7.891667 | 0.891999 |
def convert_background(entry):
try:
attr = urwid.AttrSpec(foreground, entry, colors)
except urwid.AttrSpecError:
return None
# fix 8 <= colour < 16
if colors > 16 and attr.background_basic and \
attr.background_number >= 8:
# use high-colour with same number
entry = 'h%d'%attr.background_number
attr = urwid.AttrSpec(foreground, entry, colors)
return attr, entry
return parse_chart(chart, convert_background) | def background_chart(chart, foreground, colors) | Create text markup for a background colour chart
chart -- palette chart as string
foreground -- colour to use for foreground of chart
colors -- number of colors (88 or 256)
This will remap 8 <= colour < 16 to high-colour versions
in the hopes of greater compatibility | 7.787958 | 6.389326 | 1.218901 |
part_w = None
width = None
tag_widgets = []
cols = []
width = -1
# create individual TagWidgets and sort them
tag_widgets = [TagWidget(t, attr_normal, attr_focus) for t in tags]
tag_widgets = sorted(tag_widgets)
for tag_widget in tag_widgets:
if not tag_widget.hidden:
wrapped_tagwidget = tag_widget
tag_width = tag_widget.width()
cols.append(('fixed', tag_width, wrapped_tagwidget))
width += tag_width + 1
if cols:
part_w = urwid.Columns(cols, dividechars=1)
return width, part_w | def build_tags_part(tags, attr_normal, attr_focus) | create an urwid.Columns widget (wrapped in approproate Attributes)
to display a list of tag strings, as part of a threadline.
:param tags: list of tag strings to include
:type tags: list of str
:param attr_normal: urwid attribute to use if unfocussed
:param attr_focus: urwid attribute to use if focussed
:return: overall width in characters and a Columns widget.
:rtype: tuple[int, urwid.Columns] | 4.205424 | 3.623997 | 1.160438 |
part_w = None
width = None
# extract min and max allowed width from theme
minw = 0
maxw = None
width_tuple = struct['width']
if width_tuple is not None:
if width_tuple[0] == 'fit':
minw, maxw = width_tuple[1:]
content = prepare_string(name, thread, maxw)
# pad content if not long enough
if minw:
alignment = struct['alignment']
if alignment == 'left':
content = content.ljust(minw)
elif alignment == 'center':
content = content.center(minw)
else:
content = content.rjust(minw)
# define width and part_w
text = urwid.Text(content, wrap='clip')
width = text.pack()[0]
part_w = AttrFlipWidget(text, struct)
return width, part_w | def build_text_part(name, thread, struct) | create an urwid.Text widget (wrapped in approproate Attributes)
to display a plain text parts in a threadline.
create an urwid.Columns widget (wrapped in approproate Attributes)
to display a list of tag strings, as part of a threadline.
:param name: id of part to build
:type name: str
:param thread: the thread to get local info for
:type thread: :class:`alot.db.thread.Thread`
:param struct: theming attributes for this part, as provided by
:class:`alot.settings.theme.Theme.get_threadline_theming`
:type struct: dict
:return: overall width (in characters) and a widget.
:rtype: tuple[int, AttrFliwWidget] | 4.383315 | 3.764504 | 1.164381 |
# map part names to function extracting content string and custom shortener
prep = {
'mailcount': (prepare_mailcount_string, None),
'date': (prepare_date_string, None),
'authors': (prepare_authors_string, shorten_author_string),
'subject': (prepare_subject_string, None),
'content': (prepare_content_string, None),
}
s = ' ' # fallback value
if thread:
# get extractor and shortener
content, shortener = prep[partname]
# get string
s = content(thread)
# sanitize
s = s.replace('\n', ' ')
s = s.replace('\r', '')
# shorten if max width is requested
if maxw:
if len(s) > maxw and shortener:
s = shortener(s, maxw)
else:
s = s[:maxw]
return s | def prepare_string(partname, thread, maxw) | extract a content string for part 'partname' from 'thread' of maximal
length 'maxw'. | 4.196252 | 4.030376 | 1.041156 |
self.read_notmuch_config(self._notmuchconfig.filename)
self.read_config(self._config.filename) | def reload(self) | Reload notmuch and alot config files | 7.977181 | 4.697017 | 1.698351 |
spec = os.path.join(DEFAULTSPATH, 'notmuch.rc.spec')
self._notmuchconfig = read_config(path, spec) | def read_notmuch_config(self, path) | parse notmuch's config file
:param path: path to notmuch's config file
:type path: str | 8.45445 | 8.851022 | 0.955195 |
spec = os.path.join(DEFAULTSPATH, 'alot.rc.spec')
newconfig = read_config(path, spec, report_extra=True, checks={
'mail_container': checks.mail_container,
'force_list': checks.force_list,
'align': checks.align_mode,
'attrtriple': checks.attr_triple,
'gpg_key_hint': checks.gpg_key})
self._config.merge(newconfig)
self._config.walk(self._expand_config_values)
hooks_path = os.path.expanduser(self._config.get('hooksfile'))
try:
spec = importlib.util.spec_from_file_location('hooks', hooks_path)
self.hooks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.hooks)
except:
logging.exception('unable to load hooks file:%s', hooks_path)
if 'bindings' in newconfig:
self._update_bindings(newconfig['bindings'])
tempdir = self._config.get('template_dir')
logging.debug('template directory: `%s`' % tempdir)
# themes
themestring = newconfig['theme']
themes_dir = self._config.get('themes_dir')
logging.debug('themes directory: `%s`' % themes_dir)
# if config contains theme string use that
data_dirs = [os.path.join(d, 'alot/themes') for d in DATA_DIRS]
if themestring:
# This is a python for/else loop
# https://docs.python.org/3/reference/compound_stmts.html#for
#
# tl/dr; If the loop loads a theme it breaks. If it doesn't break,
# then it raises a ConfigError.
for dir_ in itertools.chain([themes_dir], data_dirs):
theme_path = os.path.join(dir_, themestring)
if not os.path.exists(os.path.expanduser(theme_path)):
logging.warning('Theme `%s` does not exist.', theme_path)
else:
try:
self._theme = Theme(theme_path)
except ConfigError as e:
raise ConfigError('Theme file `%s` failed '
'validation:\n%s' % (theme_path, e))
else:
break
else:
raise ConfigError('Could not find theme {}, see log for more '
'information'.format(themestring))
# if still no theme is set, resort to default
if self._theme is None:
theme_path = os.path.join(DEFAULTSPATH, 'default.theme')
self._theme = Theme(theme_path)
self._accounts = self._parse_accounts(self._config)
self._accountmap = self._account_table(self._accounts) | def read_config(self, path) | parse alot's config file
:param path: path to alot's config file
:type path: str | 4.163117 | 4.109258 | 1.013107 |
def expand_environment_and_home(value):
xdg_vars = {'XDG_CONFIG_HOME': '~/.config',
'XDG_CACHE_HOME': '~/.cache'}
for xdg_name, fallback in xdg_vars.items():
if xdg_name in value:
xdg_value = get_xdg_env(xdg_name, fallback)
value = value.replace('$%s' % xdg_name, xdg_value)\
.replace('${%s}' % xdg_name, xdg_value)
return os.path.expanduser(os.path.expandvars(value))
value = section[key]
if isinstance(value, str):
section[key] = expand_environment_and_home(value)
elif isinstance(value, (list, tuple)):
new = list()
for item in value:
if isinstance(item, str):
new.append(expand_environment_and_home(item))
else:
new.append(item)
section[key] = new | def _expand_config_values(section, key) | Walker function for ConfigObj.walk
Applies expand_environment_and_home to all configuration values that
are strings (or strings that are elements of tuples/lists)
:param section: as passed by ConfigObj.walk
:param key: as passed by ConfigObj.walk | 2.286445 | 2.124644 | 1.076154 |
accounts = []
if 'accounts' in config:
for acc in config['accounts'].sections:
accsec = config['accounts'][acc]
args = dict(config['accounts'][acc].items())
# create abook for this account
abook = accsec['abook']
logging.debug('abook defined: %s', abook)
if abook['type'] == 'shellcommand':
cmd = abook['command']
regexp = abook['regexp']
if cmd is not None and regexp is not None:
ef = abook['shellcommand_external_filtering']
args['abook'] = ExternalAddressbook(
cmd, regexp, external_filtering=ef)
else:
msg = 'underspecified abook of type \'shellcommand\':'
msg += '\ncommand: %s\nregexp:%s' % (cmd, regexp)
raise ConfigError(msg)
elif abook['type'] == 'abook':
contacts_path = abook['abook_contacts_file']
args['abook'] = AbookAddressBook(
contacts_path, ignorecase=abook['ignorecase'])
else:
del args['abook']
cmd = args['sendmail_command']
del args['sendmail_command']
newacc = SendmailAccount(cmd, **args)
accounts.append(newacc)
return accounts | def _parse_accounts(config) | read accounts information from config
:param config: valit alot config
:type config: `configobj.ConfigObj`
:returns: list of accounts | 4.042474 | 4.068274 | 0.993658 |
accountmap = {}
for acc in accounts:
accountmap[acc.address] = acc
for alias in acc.aliases:
accountmap[alias] = acc
return accountmap | def _account_table(accounts) | creates a lookup table (emailaddress -> account) for a given list of
accounts
:param accounts: list of accounts
:type accounts: list of `alot.account.Account`
:returns: hashtable
:rvalue: dict (str -> `alot.account.Account`) | 3.622545 | 4.17797 | 0.867059 |
value = None
if key in self._config:
value = self._config[key]
if isinstance(value, Section):
value = None
if value is None:
value = fallback
return value | def get(self, key, fallback=None) | look up global config values from alot's config
:param key: key to look up
:type key: str
:param fallback: fallback returned if key is not present
:type fallback: str
:returns: config value with type as specified in the spec-file | 3.174122 | 3.786806 | 0.838205 |
value = None
if section in self._notmuchconfig:
if key in self._notmuchconfig[section]:
value = self._notmuchconfig[section][key]
if value is None:
value = fallback
return value | def get_notmuch_setting(self, section, key, fallback=None) | look up config values from notmuch's config
:param section: key is in
:type section: str
:param key: key to look up
:type key: str
:param fallback: fallback returned if key is not present
:type fallback: str
:returns: config value with type as specified in the spec-file | 2.10803 | 2.493502 | 0.845409 |
colours = int(self._config.get('colourmode'))
return self._theme.get_attribute(colours, mode, name, part) | def get_theming_attribute(self, mode, name, part=None) | looks up theming attribute
:param mode: ui-mode (e.g. `search`,`thread`...)
:type mode: str
:param name: identifier of the atttribute
:type name: str
:rtype: urwid.AttrSpec | 8.436316 | 10.413693 | 0.810118 |
colours = int(self._config.get('colourmode'))
return self._theme.get_threadline_theming(thread, colours) | def get_threadline_theming(self, thread) | looks up theming info a threadline displaying a given thread. This
wraps around :meth:`~alot.settings.theme.Theme.get_threadline_theming`,
filling in the current colour mode.
:param thread: thread to theme
:type thread: alot.db.thread.Thread | 7.579904 | 7.521651 | 1.007745 |
colourmode = int(self._config.get('colourmode'))
theme = self._theme
cfg = self._config
colours = [1, 16, 256]
def colourpick(triple):
if triple is None:
return None
return triple[colours.index(colourmode)]
# global default attributes for tagstrings.
# These could contain values '' and 'default' which we interpret as
# "use the values from the widget below"
default_normal = theme.get_attribute(colourmode, 'global', 'tag')
default_focus = theme.get_attribute(colourmode, 'global', 'tag_focus')
# local defaults for tagstring attributes. depend on next lower widget
fallback_normal = resolve_att(onebelow_normal, default_normal)
fallback_focus = resolve_att(onebelow_focus, default_focus)
for sec in cfg['tags'].sections:
if re.match('^{}$'.format(sec), tag):
normal = resolve_att(colourpick(cfg['tags'][sec]['normal']),
fallback_normal)
focus = resolve_att(colourpick(cfg['tags'][sec]['focus']),
fallback_focus)
translated = cfg['tags'][sec]['translated']
translated = string_decode(translated, 'UTF-8')
if translated is None:
translated = tag
translation = cfg['tags'][sec]['translation']
if translation:
translated = re.sub(translation[0], translation[1], tag)
break
else:
normal = fallback_normal
focus = fallback_focus
translated = tag
return {'normal': normal, 'focussed': focus, 'translated': translated} | def get_tagstring_representation(self, tag, onebelow_normal=None,
onebelow_focus=None) | looks up user's preferred way to represent a given tagstring.
:param tag: tagstring
:type tag: str
:param onebelow_normal: attribute that shines through if unfocussed
:type onebelow_normal: urwid.AttrSpec
:param onebelow_focus: attribute that shines through if focussed
:type onebelow_focus: urwid.AttrSpec
If `onebelow_normal` or `onebelow_focus` is given these attributes will
be used as fallbacks for fg/bg values '' and 'default'.
This returns a dictionary mapping
:normal: to :class:`urwid.AttrSpec` used if unfocussed
:focussed: to :class:`urwid.AttrSpec` used if focussed
:translated: to an alternative string representation | 4.859641 | 4.455039 | 1.090819 |
globalmaps, modemaps = {}, {}
bindings = self._bindings
# get bindings for mode `mode`
# retain empty assignations to silence corresponding global mappings
if mode in bindings.sections:
for key in bindings[mode].scalars:
value = bindings[mode][key]
if isinstance(value, list):
value = ','.join(value)
modemaps[key] = value
# get global bindings
# ignore the ones already mapped in mode bindings
for key in bindings.scalars:
if key not in modemaps:
value = bindings[key]
if isinstance(value, list):
value = ','.join(value)
if value and value != '':
globalmaps[key] = value
# get rid of empty commands left in mode bindings
for k, v in list(modemaps.items()):
if not v:
del modemaps[k]
return globalmaps, modemaps | def get_keybindings(self, mode) | look up keybindings from `MODE-maps` sections
:param mode: mode identifier
:type mode: str
:returns: dictionaries of key-cmd for global and specific mode
:rtype: 2-tuple of dicts | 4.789724 | 4.695203 | 1.020131 |
cmdline = None
bindings = self._bindings
if key in bindings.scalars:
cmdline = bindings[key]
if mode in bindings.sections:
if key in bindings[mode].scalars:
value = bindings[mode][key]
if value:
cmdline = value
else:
# to be sure it isn't mapped globally
cmdline = None
# Workaround for ConfigObj misbehaviour. cf issue #500
# this ensures that we get at least strings only as commandlines
if isinstance(cmdline, list):
cmdline = ','.join(cmdline)
return cmdline | def get_keybinding(self, mode, key) | look up keybinding from `MODE-maps` sections
:param mode: mode identifier
:type mode: str
:param key: urwid-style key identifier
:type key: str
:returns: a command line to be applied upon keypress
:rtype: str | 7.270911 | 7.200063 | 1.00984 |
_, address = email.utils.parseaddr(address)
for account in self.get_accounts():
if account.matches_address(address):
return account
if return_default:
try:
return self.get_accounts()[0]
except IndexError:
# Fall through
pass
raise NoMatchingAccount | def account_matching_address(self, address, return_default=False) | returns :class:`Account` for a given email address (str)
:param str address: address to look up. A realname part will be ignored.
:param bool return_default: If True and no address can be found, then
the default account wil be returned.
:rtype: :class:`Account`
:raises ~alot.settings.errors.NoMatchingAccount: If no account can be
found. This includes if return_default is True and there are no
accounts defined. | 3.461823 | 3.006909 | 1.15129 |
order = order or []
abooks = []
for a in order:
if a:
if a.abook:
abooks.append(a.abook)
if append_remaining:
for a in self._accounts:
if a.abook and a.abook not in abooks:
abooks.append(a.abook)
return abooks | def get_addressbooks(self, order=None, append_remaining=True) | returns list of all defined :class:`AddressBook` objects | 2.573913 | 2.48063 | 1.037605 |
return mailcap.findmatch(self._mailcaps, *args, **kwargs) | def mailcap_find_match(self, *args, **kwargs) | Propagates :func:`mailcap.find_match` but caches the mailcap (first
argument) | 6.928696 | 7.862615 | 0.88122 |
fixed_format = self.get('timestamp_format')
if fixed_format:
rep = string_decode(d.strftime(fixed_format), 'UTF-8')
else:
format_hook = self.get_hook('timestamp_format')
if format_hook:
rep = string_decode(format_hook(d), 'UTF-8')
else:
rep = pretty_datetime(d)
return rep | def represent_datetime(self, d) | turns a given datetime obj into a string representation.
This will:
1) look if a fixed 'timestamp_format' is given in the config
2) check if a 'timestamp_format' hook is defined
3) use :func:`~alot.helper.pretty_datetime` as fallback | 3.642498 | 2.536133 | 1.436241 |
linewidget, _ = self.bufferlist.get_focus()
bufferlinewidget = linewidget.get_focus().original_widget
return bufferlinewidget.get_buffer() | def get_selected_buffer(self) | returns currently selected :class:`Buffer` element from list | 8.143363 | 7.25308 | 1.122745 |
return (
self._computed and
self.solver.computed and
(self.kernel is None or not self.kernel.dirty)
) | def computed(self) | Has the processes been computed since the last update of the kernel? | 11.034458 | 6.58059 | 1.676819 |
t = np.atleast_1d(t)
# Deal with one-dimensional data.
if len(t.shape) == 1:
t = np.atleast_2d(t).T
# Double check the dimensions against the kernel.
if len(t.shape) != 2 or (self.kernel is not None and
t.shape[1] != self.kernel.ndim):
raise ValueError("Dimension mismatch")
return t | def parse_samples(self, t) | Parse a list of samples to make sure that it has the correct
dimensions.
:param t: ``(nsamples,)`` or ``(nsamples, ndim)``
The list of samples. If 1-D, this is assumed to be a list of
one-dimensional samples otherwise, the size of the second
dimension is assumed to be the dimension of the input space.
Raises:
ValueError: If the input dimension doesn't match the dimension of
the kernel. | 3.378396 | 3.386538 | 0.997596 |
self.recompute(quiet=False)
r = np.array(y, dtype=np.float64, order="F")
r = self._check_dimensions(r, check_dim=False)
# Broadcast the mean function
m = [slice(None)] + [np.newaxis for _ in range(len(r.shape) - 1)]
r -= self._call_mean(self._x)[m]
# Do the solve
if len(r.shape) == 1:
b = self.solver.apply_inverse(r, in_place=True).flatten()
else:
b = self.solver.apply_inverse(r, in_place=True)
return b | def apply_inverse(self, y) | Self-consistently apply the inverse of the computed kernel matrix to
some vector or matrix of samples. This method subtracts the mean,
sorts the samples, then returns the samples in the correct (unsorted)
order.
:param y: ``(nsamples, )`` or ``(nsamples, K)``
The vector (or matrix) of sample values. | 4.4709 | 4.863789 | 0.919222 |
# Parse the input coordinates and ensure the right memory layout.
self._x = self.parse_samples(x)
self._x = np.ascontiguousarray(self._x, dtype=np.float64)
try:
self._yerr2 = float(yerr)**2 * np.ones(len(x))
except TypeError:
self._yerr2 = self._check_dimensions(yerr) ** 2
self._yerr2 = np.ascontiguousarray(self._yerr2, dtype=np.float64)
# Set up and pre-compute the solver.
self.solver = self.solver_type(self.kernel, **(self.solver_kwargs))
# Include the white noise term.
yerr = np.sqrt(self._yerr2 + np.exp(self._call_white_noise(self._x)))
self.solver.compute(self._x, yerr, **kwargs)
self._const = -0.5 * (len(self._x) * np.log(2 * np.pi) +
self.solver.log_determinant)
self.computed = True
self._alpha = None | def compute(self, x, yerr=0.0, **kwargs) | Pre-compute the covariance matrix and factorize it for a set of times
and uncertainties.
:param x: ``(nsamples,)`` or ``(nsamples, ndim)``
The independent coordinates of the data points.
:param yerr: (optional) ``(nsamples,)`` or scalar
The Gaussian uncertainties on the data points at coordinates
``x``. These values will be added in quadrature to the diagonal of
the covariance matrix. | 3.745652 | 3.591693 | 1.042865 |
if not self.computed:
if not (hasattr(self, "_x") and hasattr(self, "_yerr2")):
raise RuntimeError("You need to compute the model first")
try:
# Update the model making sure that we store the original
# ordering of the points.
self.compute(self._x, np.sqrt(self._yerr2), **kwargs)
except (ValueError, LinAlgError):
if quiet:
return False
raise
return True | def recompute(self, quiet=False, **kwargs) | Re-compute a previously computed model. You might want to do this if
the kernel parameters change and the kernel is labeled as ``dirty``.
:param quiet: (optional)
If ``True``, return false when the computation fails. Otherwise,
throw an error if something goes wrong. (default: ``False``) | 5.400838 | 4.85597 | 1.112206 |
if not self.recompute(quiet=quiet):
return -np.inf
try:
mu = self._call_mean(self._x)
except ValueError:
if quiet:
return -np.inf
raise
r = np.ascontiguousarray(self._check_dimensions(y) - mu,
dtype=np.float64)
ll = self._const - 0.5 * self.solver.dot_solve(r)
return ll if np.isfinite(ll) else -np.inf | def log_likelihood(self, y, quiet=False) | Compute the logarithm of the marginalized likelihood of a set of
observations under the Gaussian process model. You must call
:func:`GP.compute` before this function.
:param y: ``(nsamples, )``
The observations at the coordinates provided in the ``compute``
step.
:param quiet:
If ``True`` return negative infinity instead of raising an
exception when there is an invalid kernel or linear algebra
failure. (default: ``False``) | 4.643732 | 4.606586 | 1.008063 |
# Make sure that the model is computed and try to recompute it if it's
# dirty.
if not self.recompute(quiet=quiet):
return np.zeros(len(self), dtype=np.float64)
# Pre-compute some factors.
try:
alpha = self._compute_alpha(y, False)
except ValueError:
if quiet:
return np.zeros(len(self), dtype=np.float64)
raise
if len(self.white_noise) or len(self.kernel):
K_inv = self.solver.get_inverse()
A = np.einsum("i,j", alpha, alpha) - K_inv
# Compute each component of the gradient.
grad = np.empty(len(self))
n = 0
l = len(self.mean)
if l:
try:
mu = self._call_mean_gradient(self._x)
except ValueError:
if quiet:
return np.zeros(len(self), dtype=np.float64)
raise
grad[n:n+l] = np.dot(mu, alpha)
n += l
l = len(self.white_noise)
if l:
wn = self._call_white_noise(self._x)
wng = self._call_white_noise_gradient(self._x)
grad[n:n+l] = 0.5 * np.sum((np.exp(wn)*np.diag(A))[None, :]*wng,
axis=1)
n += l
l = len(self.kernel)
if l:
Kg = self.kernel.get_gradient(self._x)
grad[n:n+l] = 0.5 * np.einsum("ijk,ij", Kg, A)
return grad | def grad_log_likelihood(self, y, quiet=False) | Compute the gradient of :func:`GP.log_likelihood` as a function of the
parameters returned by :func:`GP.get_parameter_vector`. You must call
:func:`GP.compute` before this function.
:param y: ``(nsamples,)``
The list of observations at coordinates ``x`` provided to the
:func:`compute` function.
:param quiet:
If ``True`` return a gradient of zero instead of raising an
exception when there is an invalid kernel or linear algebra
failure. (default: ``False``) | 3.089811 | 3.072931 | 1.005493 |
self.recompute()
alpha = self._compute_alpha(y, cache)
xs = self.parse_samples(t)
if kernel is None:
kernel = self.kernel
# Compute the predictive mean.
Kxs = kernel.get_value(xs, self._x)
mu = np.dot(Kxs, alpha) + self._call_mean(xs)
if not (return_var or return_cov):
return mu
KinvKxs = self.solver.apply_inverse(Kxs.T)
if return_var:
var = kernel.get_value(xs, diag=True)
var -= np.sum(Kxs.T*KinvKxs, axis=0)
return mu, var
cov = kernel.get_value(xs)
cov -= np.dot(Kxs, KinvKxs)
return mu, cov | def predict(self, y, t,
return_cov=True,
return_var=False,
cache=True,
kernel=None) | Compute the conditional predictive distribution of the model. You must
call :func:`GP.compute` before this function.
:param y: ``(nsamples,)``
The observations to condition the model on.
:param t: ``(ntest,)`` or ``(ntest, ndim)``
The coordinates where the predictive distribution should be
computed.
:param return_cov: (optional)
If ``True``, the full covariance matrix is computed and returned.
Otherwise, only the mean prediction is computed. (default:
``True``)
:param return_var: (optional)
If ``True``, only return the diagonal of the predictive covariance;
this will be faster to compute than the full covariance matrix.
This overrides ``return_cov`` so, if both are set to ``True``,
only the diagonal is computed. (default: ``False``)
:param cache: (optional)
If ``True`` the value of alpha will be cached to speed up repeated
predictions.
:param kernel: (optional)
If provided, this kernel will be used to calculate the cross terms.
This can be used to separate the predictions from different
kernels.
Returns ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values
of ``return_cov`` and ``return_var``. These output values are:
* **mu** ``(ntest,)``: mean of the predictive distribution,
* **cov** ``(ntest, ntest)``: the predictive covariance matrix, and
* **var** ``(ntest,)``: the diagonal elements of ``cov``. | 3.443091 | 3.889976 | 0.885119 |
mu, cov = self.predict(y, t)
return multivariate_gaussian_samples(cov, size, mean=mu) | def sample_conditional(self, y, t, size=1) | Draw samples from the predictive conditional distribution. You must
call :func:`GP.compute` before this function.
:param y: ``(nsamples, )``
The observations to condition the model on.
:param t: ``(ntest, )`` or ``(ntest, ndim)``
The coordinates where the predictive distribution should be
computed.
:param size: (optional)
The number of samples to draw. (default: ``1``)
Returns **samples** ``(size, ntest)``, a list of predictions at
coordinates given by ``t``. | 6.50151 | 8.485992 | 0.766146 |
if t is None:
self.recompute()
n, _ = self._x.shape
# Generate samples using the precomputed factorization.
results = self.solver.apply_sqrt(np.random.randn(size, n))
results += self._call_mean(self._x)
return results[0] if size == 1 else results
x = self.parse_samples(t)
cov = self.get_matrix(x)
cov[np.diag_indices_from(cov)] += TINY
return multivariate_gaussian_samples(cov, size,
mean=self._call_mean(x)) | def sample(self, t=None, size=1) | Draw samples from the prior distribution.
:param t: ``(ntest, )`` or ``(ntest, ndim)`` (optional)
The coordinates where the model should be sampled. If no
coordinates are given, the precomputed coordinates and
factorization are used.
:param size: (optional)
The number of samples to draw. (default: ``1``)
Returns **samples** ``(size, ntest)``, a list of predictions at
coordinates given by ``t``. If ``size == 1``, the result is a single
sample with shape ``(ntest,)``. | 5.559806 | 5.380837 | 1.03326 |
x1 = self.parse_samples(x1)
if x2 is None:
return self.kernel.get_value(x1)
x2 = self.parse_samples(x2)
return self.kernel.get_value(x1, x2) | def get_matrix(self, x1, x2=None) | Get the covariance matrix at a given set or two of independent
coordinates.
:param x1: ``(nsamples,)`` or ``(nsamples, ndim)``
A list of samples.
:param x2: ``(nsamples,)`` or ``(nsamples, ndim)`` (optional)
A second list of samples. If this is given, the cross covariance
matrix is computed. Otherwise, the auto-covariance is evaluated. | 2.420624 | 3.033051 | 0.798082 |
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f:
f.write("int main (int argc, char **argv) { return 0; }")
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True | def has_flag(compiler, flagname) | Return a boolean indicating whether a flag name is supported on
the specified compiler. | 1.791173 | 1.947381 | 0.919785 |
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as srcfile:
srcfile.write("int main (int argc, char **argv) { return 0; }")
srcfile.flush()
outfn = srcfile.name + ".so"
try:
compiler.link_executable(
[srcfile.name],
outfn,
libraries=[libname],
)
except setuptools.distutils.errors.LinkError:
return False
if not os.path.exists(outfn):
return False
os.remove(outfn)
return True | def has_library(compiler, libname) | Return a boolean indicating whether a library is found. | 2.306584 | 2.273181 | 1.014694 |
# Compute the kernel matrix.
K = self.kernel.get_value(x)
K[np.diag_indices_from(K)] += yerr ** 2
# Factor the matrix and compute the log-determinant.
self._factor = (cholesky(K, overwrite_a=True, lower=False), False)
self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0])))
self.computed = True | def compute(self, x, yerr) | Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix. | 3.203414 | 3.372699 | 0.949807 |
r
return cho_solve(self._factor, y, overwrite_b=in_place) | def apply_inverse(self, y, in_place=False) | r"""
Apply the inverse of the covariance matrix to the input by solving
.. math::
K\,x = y
Args:
y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or
matrix :math:`y`.
in_place (Optional[bool]): Should the data in ``y`` be overwritten
with the result :math:`x`? (default: ``False``) | 13.008392 | 16.043098 | 0.81084 |
r
return np.dot(y.T, cho_solve(self._factor, y)) | def dot_solve(self, y) | r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`. | 14.969055 | 14.336878 | 1.044094 |
return self.apply_inverse(np.eye(len(self._factor[0])), in_place=True) | def get_inverse(self) | Get the dense inverse covariance matrix. This is used for computing
gradients, but it is not recommended in general. | 12.19793 | 11.304406 | 1.079042 |
return np.array([getattr(self, k) for k in self.parameter_names]) | def parameter_vector(self) | An array of all parameters (including frozen parameters) | 4.690699 | 4.278557 | 1.096327 |
return OrderedDict(zip(
self.get_parameter_names(include_frozen=include_frozen),
self.get_parameter_vector(include_frozen=include_frozen),
)) | def get_parameter_dict(self, include_frozen=False) | Get an ordered dictionary of the parameters
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``) | 2.979722 | 3.222576 | 0.92464 |
if include_frozen:
return self.parameter_names
return tuple(p
for p, f in zip(self.parameter_names, self.unfrozen_mask)
if f) | def get_parameter_names(self, include_frozen=False) | Get a list of the parameter names
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``) | 4.755325 | 5.501546 | 0.864362 |
if include_frozen:
return self.parameter_bounds
return list(p
for p, f in zip(self.parameter_bounds, self.unfrozen_mask)
if f) | def get_parameter_bounds(self, include_frozen=False) | Get a list of the parameter bounds
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``) | 5.56426 | 5.97598 | 0.931104 |
if include_frozen:
return self.parameter_vector
return self.parameter_vector[self.unfrozen_mask] | def get_parameter_vector(self, include_frozen=False) | Get an array of the parameter values in the correct order
Args:
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``) | 3.815786 | 4.789773 | 0.796653 |
v = self.parameter_vector
if include_frozen:
v[:] = vector
else:
v[self.unfrozen_mask] = vector
self.parameter_vector = v
self.dirty = True | def set_parameter_vector(self, vector, include_frozen=False) | Set the parameter values to the given vector
Args:
vector (array[vector_size] or array[full_size]): The target
parameter vector. This must be in the same order as
``parameter_names`` and it should only include frozen
parameters if ``include_frozen`` is ``True``.
include_frozen (Optional[bool]): Should the frozen parameters be
included in the returned value? (default: ``False``) | 3.439492 | 4.780194 | 0.71953 |
i = self.get_parameter_names(include_frozen=True).index(name)
self.unfrozen_mask[i] = False | def freeze_parameter(self, name) | Freeze a parameter by name
Args:
name: The name of the parameter | 6.817584 | 8.643686 | 0.788736 |
i = self.get_parameter_names(include_frozen=True).index(name)
self.unfrozen_mask[i] = True | def thaw_parameter(self, name) | Thaw a parameter by name
Args:
name: The name of the parameter | 6.856505 | 10.166674 | 0.67441 |
i = self.get_parameter_names(include_frozen=True).index(name)
return self.get_parameter_vector(include_frozen=True)[i] | def get_parameter(self, name) | Get a parameter value by name
Args:
name: The name of the parameter | 4.823179 | 6.742128 | 0.715379 |
i = self.get_parameter_names(include_frozen=True).index(name)
v = self.get_parameter_vector(include_frozen=True)
v[i] = value
self.set_parameter_vector(v, include_frozen=True) | def set_parameter(self, name, value) | Set a parameter value by name
Args:
name: The name of the parameter
value (float): The new value for the parameter | 2.913828 | 3.307677 | 0.880929 |
for p, b in zip(self.parameter_vector, self.parameter_bounds):
if b[0] is not None and p < b[0]:
return -np.inf
if b[1] is not None and p > b[1]:
return -np.inf
return 0.0 | def log_prior(self) | Compute the log prior probability of the current parameters | 2.252592 | 2.094183 | 1.075642 |
if mean is None:
mean = np.zeros(len(matrix))
samples = np.random.multivariate_normal(mean, matrix, N)
if N == 1:
return samples[0]
return samples | def multivariate_gaussian_samples(matrix, N, mean=None) | Generate samples from a multidimensional Gaussian with a given covariance.
:param matrix: ``(k, k)``
The covariance matrix.
:param N:
The number of samples to generate.
:param mean: ``(k,)`` (optional)
The mean of the Gaussian. Assumed to be zero if not given.
:returns samples: ``(k,)`` or ``(N, k)``
Samples from the given multivariate normal. | 1.959341 | 2.504685 | 0.78227 |
# Check the shape of the sample list.
assert len(samples.shape) == 2
# Build a KD-tree on the samples.
tree = cKDTree(samples)
# Compute the distances.
d, i = tree.query(samples[0], k=len(samples))
return i | def nd_sort_samples(samples) | Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version. | 4.104662 | 4.071857 | 1.008056 |
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None | def lookup_function(val) | Look-up and return a pretty-printer that can print va. | 4.018392 | 2.680296 | 1.499234 |
# Follow the same logic Django uses for determining access to the
# static-serving view.
if not django_settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the --insecure "
"option of 'runserver' is used")
if not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED:
# Collect only the requested file, in order to serve the result as
# fast as possible. This won't interfere with the template tags in any
# way, as those will still cause Django to collect all media.
default_collector.collect(request, files=[path])
return serve(request, path, document_root=django_settings.STATIC_ROOT,
**kwargs) | def serve_static(request, path, insecure=False, **kwargs) | Collect and serve static files.
This view serves up static files, much like Django's
:py:func:`~django.views.static.serve` view, with the addition that it
collects static files first (if enabled). This allows images, fonts, and
other assets to be served up without first loading a page using the
``{% javascript %}`` or ``{% stylesheet %}`` template tags.
You can use this view by adding the following to any :file:`urls.py`::
urlpatterns += static('static/', view='pipeline.views.serve_static') | 8.175333 | 8.625634 | 0.947795 |
js = self.concatenate(paths)
if templates:
js = js + self.compile_templates(templates)
if not settings.DISABLE_WRAPPER:
js = settings.JS_WRAPPER % js
compressor = self.js_compressor
if compressor:
js = getattr(compressor(verbose=self.verbose), 'compress_js')(js)
return js | def compress_js(self, paths, templates=None, **kwargs) | Concatenate and compress JS files | 4.607435 | 4.562872 | 1.009766 |
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant) | def compress_css(self, paths, output_filename, variant=None, **kwargs) | Concatenate and compress CSS files | 4.291738 | 4.368865 | 0.982346 |
if not base:
path = os.path.basename(path)
if path == base:
base = os.path.dirname(path)
name = re.sub(r"^%s[\/\\]?(.*)%s$" % (
re.escape(base), re.escape(settings.TEMPLATE_EXT)
), r"\1", path)
return re.sub(r"[\/\\]", settings.TEMPLATE_SEPARATOR, name) | def template_name(self, path, base) | Find out the name of a JS template | 3.285801 | 3.226759 | 1.018297 |
stylesheets = []
for path in paths:
def reconstruct(match):
quote = match.group(1) or ''
asset_path = match.group(2)
if NON_REWRITABLE_URL.match(asset_path):
return "url(%s%s%s)" % (quote, asset_path, quote)
asset_url = self.construct_asset_path(asset_path, path,
output_filename, variant)
return "url(%s)" % asset_url
content = self.read_text(path)
# content needs to be unicode to avoid explosions with non-ascii chars
content = re.sub(URL_DETECTOR, reconstruct, content)
stylesheets.append(content)
return '\n'.join(stylesheets) | def concatenate_and_rewrite(self, paths, output_filename, variant=None) | Concatenate together files and rewrite urls | 3.862891 | 3.822922 | 1.010455 |
public_path = self.absolute_path(asset_path, os.path.dirname(css_path).replace('\\', '/'))
if self.embeddable(public_path, variant):
return "__EMBED__%s" % public_path
if not posixpath.isabs(asset_path):
asset_path = self.relative_path(public_path, output_filename)
return asset_path | def construct_asset_path(self, asset_path, css_path, output_filename, variant=None) | Return a rewritten asset URL for a stylesheet | 3.911322 | 4.057318 | 0.964017 |
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if ext not in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.EMBED_MAX_IMAGE_SIZE):
return False
return True | def embeddable(self, path, variant) | Is the asset embeddable ? | 4.948055 | 4.822745 | 1.025983 |
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_bytes(path)
self.__class__.asset_contents[path] = force_text(base64.b64encode(data))
return self.__class__.asset_contents[path] | def encoded_content(self, path) | Return the base64 encoded contents | 2.761274 | 2.692564 | 1.025518 |
name, ext = os.path.splitext(path)
return MIME_TYPES[ext] | def mime_type(self, path) | Get mime-type from filename | 4.982457 | 4.393506 | 1.13405 |
if posixpath.isabs(path):
path = posixpath.join(staticfiles_storage.location, path)
else:
path = posixpath.join(start, path)
return posixpath.normpath(path) | def absolute_path(self, path, start) | Return the absolute public path for an asset,
given the path of the stylesheet that contains it. | 3.0104 | 3.295701 | 0.913432 |
absolute_path = posixpath.join(settings.PIPELINE_ROOT, absolute_path)
output_path = posixpath.join(settings.PIPELINE_ROOT, posixpath.dirname(output_filename))
return relpath(absolute_path, output_path) | def relative_path(self, absolute_path, output_filename) | Rewrite paths relative to the output stylesheet path | 3.086942 | 3.011465 | 1.025063 |
file = staticfiles_storage.open(path)
content = file.read()
file.close()
return content | def read_bytes(self, path) | Read file content in binary mode | 4.041928 | 4.273164 | 0.945886 |
if not has_magic(pathname):
yield pathname
return
dirname, basename = os.path.split(pathname)
if not dirname:
for name in glob1(dirname, basename):
yield name
return
if has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name) | def iglob(pathname) | Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch. | 2.066145 | 2.223876 | 0.929074 |
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list) | def relpath(path, start=posixpath.curdir) | Return a relative version of a path | 1.283408 | 1.246608 | 1.029519 |
if not fcntl:
return
for f in (sys.__stdout__, sys.__stderr__):
fileno = f.fileno()
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) | def set_std_streams_blocking() | Set stdout and stderr to be blocking.
This is called after Popen.communicate() to revert stdout and stderr back
to be blocking (the default) in the event that the process to which they
were passed manipulated one or both file descriptors to be non-blocking. | 1.944934 | 2.167525 | 0.897307 |
argument_list = []
for flattening_arg in command:
if isinstance(flattening_arg, string_types):
argument_list.append(flattening_arg)
else:
argument_list.extend(flattening_arg)
# The first element in argument_list is the program that will be executed; if it is '', then
# a PermissionError will be raised. Thus empty arguments are filtered out from argument_list
argument_list = list(filter(None, argument_list))
stdout = None
try:
# We always catch stdout in a file, but we may not have a use for it.
temp_file_container = cwd or os.path.dirname(stdout_captured or "") or os.getcwd()
with NamedTemporaryFile(delete=False, dir=temp_file_container) as stdout:
compiling = subprocess.Popen(argument_list, cwd=cwd,
stdout=stdout,
stderr=subprocess.PIPE)
_, stderr = compiling.communicate()
set_std_streams_blocking()
if compiling.returncode != 0:
stdout_captured = None # Don't save erroneous result.
raise CompilerError(
"{0!r} exit code {1}\n{2}".format(argument_list, compiling.returncode, stderr),
command=argument_list,
error_output=stderr)
# User wants to see everything that happened.
if self.verbose:
with open(stdout.name) as out:
print(out.read())
print(stderr)
except OSError as e:
stdout_captured = None # Don't save erroneous result.
raise CompilerError(e, command=argument_list,
error_output=text_type(e))
finally:
# Decide what to do with captured stdout.
if stdout:
if stdout_captured:
shutil.move(stdout.name, os.path.join(cwd or os.curdir, stdout_captured))
else:
os.remove(stdout.name) | def execute_command(self, command, cwd=None, stdout_captured=None) | Execute a command at cwd, saving its normal output at
stdout_captured. Errors, defined as nonzero return code or a failure
to start execution, will raise a CompilerError exception with a
description of the cause. They do not write output.
This is file-system safe (any valid file names are allowed, even with
spaces or crazy characters) and OS agnostic (existing and future OSes
that Python supports should already work).
The only thing weird here is that any incoming command arg item may
itself be a tuple. This allows compiler implementations to look clean
while supporting historical string config settings and maintaining
backwards compatibility. Thus, we flatten one layer deep.
((env, foocomp), infile, (-arg,)) -> (env, foocomp, infile, -arg) | 3.89706 | 3.804395 | 1.024357 |
packager = Packager()
css_packages = getattr(cls, 'css_packages', {})
return dict(
(media_target,
cls._get_media_files(packager=packager,
media_packages=media_packages,
media_type='css',
extra_files=extra_files.get(media_target,
[])))
for media_target, media_packages in six.iteritems(css_packages)
) | def _get_css_files(cls, extra_files) | Return all CSS files from the Media class.
Args:
extra_files (dict):
The contents of the Media class's original :py:attr:`css`
attribute, if one was provided.
Returns:
dict:
The CSS media types and files to return for the :py:attr:`css`
attribute. | 3.941274 | 3.884407 | 1.01464 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.