index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
61,552 |
textile.core
|
fTextileList
| null |
def fTextileList(self, match):
text = re.split(r'\n(?=[*#;:])', match.group(), flags=re.M)
pt = ''
result = []
ls = OrderedDict()
for i, line in enumerate(text):
try:
nextline = text[i + 1]
except IndexError:
nextline = ''
m = re.search(r"^(?P<tl>[#*;:]+)(?P<st>_|\d+)?(?P<atts>{0})[ .]"
"(?P<content>.*)$".format(cls_re_s), line, re.S)
if m:
tl, start, atts, content = m.groups()
content = content.strip()
else:
result.append(line)
continue
nl = ''
ltype = list_type(tl)
tl_tags = {';': 'dt', ':': 'dd'}
litem = tl_tags.get(tl[0], 'li')
showitem = len(content) > 0
# handle list continuation/start attribute on ordered lists
if ltype == 'o':
if not hasattr(self, 'olstarts'):
self.olstarts = {tl: 1}
# does the first line of this ol have a start attribute
if len(tl) > len(pt):
# no, set it to 1.
if start is None:
self.olstarts[tl] = 1
# yes, set it to the given number
elif start != '_':
self.olstarts[tl] = int(start)
# we won't need to handle the '_' case, we'll just
# print out the number when it's needed
# put together the start attribute if needed
if len(tl) > len(pt) and start is not None:
start = ' start="{0}"'.format(self.olstarts[tl])
# This will only increment the count for list items, not
# definition items
if showitem:
# Assume properly formatted input
try:
self.olstarts[tl] = self.olstarts[tl] + 1
# if we get here, we've got some poor textile formatting.
# add this type of list to olstarts and assume we'll start
# it at 1. expect screwy output.
except KeyError:
self.olstarts[tl] = 1
nm = re.match(r"^(?P<nextlistitem>[#\*;:]+)(_|[\d]+)?{0}"
r"[ .].*".format(cls_re_s), nextline)
if nm:
nl = nm.group('nextlistitem')
# We need to handle nested definition lists differently. If
# the next tag is a dt (';') of a lower nested level than the
# current dd (':'),
if ';' in pt and ':' in tl:
ls[tl] = 2
atts = pba(atts, restricted=self.restricted)
tabs = '\t' * len(tl)
# If start is still None, set it to '', else leave the value that
# we've already formatted.
start = start or ''
# if this item tag isn't in the list, create a new list and
# item, else just create the item
if tl not in ls:
ls[tl] = 1
itemtag = ("\n{0}\t<{1}>{2}".format(tabs, litem, content) if
showitem else '')
line = "<{0}l{1}{2}>{3}".format(ltype, atts, start, itemtag)
else:
line = ("\t<{0}{1}>{2}".format(litem, atts, content) if
showitem else '')
line = '{0}{1}'.format(tabs, line)
if len(nl) <= len(tl):
if showitem:
line = "{0}</{1}>".format(line, litem)
# work backward through the list closing nested lists/items
for k, v in reversed(list(ls.items())):
if len(k) > len(nl):
if v != 2:
line = "{0}\n{1}</{2}l>".format(line, tabs,
list_type(k))
if len(k) > 1 and v != 2:
line = "{0}</{1}>".format(line, litem)
del ls[k]
# Remember the current Textile tag:
pt = tl
# This else exists in the original php version. I'm not sure how
# to come up with a case where the line would not match. I think
# it may have been necessary due to the way php returns matches.
# else:
#line = "{0}\n".format(line)
result.append(line)
return self.doTagBr(litem, "\n".join(result))
|
(self, match)
|
61,553 |
textile.core
|
footnoteID
| null |
def footnoteID(self, m):
fn_att = OrderedDict({'class': 'footnote'})
if m.group('id') not in self.fn:
self.fn[m.group('id')] = '{0}{1}'.format(self.linkPrefix,
self._increment_link_index())
fnid = self.fn[m.group('id')]
fn_att['id'] = 'fnrev{0}'.format(fnid)
fnid = self.fn[m.group('id')]
footref = generate_tag('a', m.group('id'), {'href': '#fn{0}'.format(
fnid)})
if '!' == m.group('nolink'):
footref = m.group('id')
footref = generate_tag('sup', footref, fn_att)
return '{0}{1}'.format(footref, m.group('space'))
|
(self, m)
|
61,554 |
textile.core
|
footnoteRef
| null |
def footnoteRef(self, text):
# somehow php-textile gets away with not capturing the space.
return re.compile(r'(?<=\S)\[(?P<id>{0}+)(?P<nolink>!?)\]'
r'(?P<space>{1}?)'.format(regex_snippets['digit'],
regex_snippets['space']), re.U).sub(self.footnoteID, text)
|
(self, text)
|
61,555 |
textile.core
|
getHTMLComments
|
Search the string for HTML comments, e.g. <!-- comment text -->. We
send the text that matches this to fParseHTMLComments.
|
def getHTMLComments(self, text):
"""Search the string for HTML comments, e.g. <!-- comment text -->. We
send the text that matches this to fParseHTMLComments."""
return self.doSpecial(text, '<!--', '-->', self.fParseHTMLComments)
|
(self, text)
|
61,556 |
textile.core
|
getRefs
|
Capture and store URL references in self.urlrefs.
|
def getRefs(self, text):
"""Capture and store URL references in self.urlrefs."""
return self.url_ref_regex.sub(self.refs, text)
|
(self, text)
|
61,557 |
textile.core
|
glyphs
|
Because of the split command, the regular expressions are different for
when the text at the beginning and the rest of the text.
for example:
let's say the raw text provided is "*Here*'s some textile"
before it gets to this glyphs method, the text has been converted to
"<strong>Here</strong>'s some textile"
When run through the split, we end up with ["<strong>", "Here",
"</strong>", "'s some textile"]. The re.search that follows tells it
not to ignore html tags.
If the single quote is the first character on the line, it's an open
single quote. If it's the first character of one of those splits, it's
an apostrophe or closed single quote, but the regex will bear that out.
A similar situation occurs for double quotes as well.
So, for the first pass, we use the glyph_search_initial set of
regexes. For all remaining passes, we use glyph_search
|
def glyphs(self, text):
"""
Because of the split command, the regular expressions are different for
when the text at the beginning and the rest of the text.
for example:
let's say the raw text provided is "*Here*'s some textile"
before it gets to this glyphs method, the text has been converted to
"<strong>Here</strong>'s some textile"
When run through the split, we end up with ["<strong>", "Here",
"</strong>", "'s some textile"]. The re.search that follows tells it
not to ignore html tags.
If the single quote is the first character on the line, it's an open
single quote. If it's the first character of one of those splits, it's
an apostrophe or closed single quote, but the regex will bear that out.
A similar situation occurs for double quotes as well.
So, for the first pass, we use the glyph_search_initial set of
regexes. For all remaining passes, we use glyph_search
"""
text = text.rstrip('\n')
result = []
searchlist = self.glyph_search_initial
# split the text by any angle-bracketed tags
for i, line in enumerate(re.compile(r'(<[\w\/!?].*?>)', re.U).split(
text)):
if not i % 2:
for s, r in zip(searchlist, self.glyph_replace):
line = s.sub(r, line)
result.append(line)
if i == 0:
searchlist = self.glyph_search
return ''.join(result)
|
(self, text)
|
61,558 |
textile.core
|
graf
| null |
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.getHTMLComments(text)
text = self.getRefs(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.table(text)
text = self.redcloth_list(text)
text = self.textileLists(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.noteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
|
(self, text)
|
61,559 |
textile.core
|
image
| null |
def image(self, text):
pattern = re.compile(r"""
(?:[\[{{])? # pre
\! # opening !
(\<|\=|\>)? # optional alignment atts
({0}) # optional style,class atts
(?:\.\s)? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}}]|(?=\s|$)) # lookahead: space or end of string
""".format(cls_re_s), re.U | re.X)
return pattern.sub(self.fImage, text)
|
(self, text)
|
61,560 |
textile.core
|
links
|
For some reason, the part of the regex below that matches the url
does not match a trailing parenthesis. It gets caught by tail, and
we check later to see if it should be included as part of the url.
|
def links(self, text):
"""For some reason, the part of the regex below that matches the url
does not match a trailing parenthesis. It gets caught by tail, and
we check later to see if it should be included as part of the url."""
text = self.markStartOfLinks(text)
return self.replaceLinks(text)
|
(self, text)
|
61,561 |
textile.core
|
makeBackrefLink
|
Given the pieces of a back reference link, create an <a> tag.
|
def makeBackrefLink(self, info, g_links, i):
"""Given the pieces of a back reference link, create an <a> tag."""
atts, content, infoid, link = '', '', '', ''
if 'def' in info:
link = info['def']['link']
backlink_type = link or g_links
i_ = encode_high(i)
allow_inc = i not in syms_re_s
i_ = int(i_)
if backlink_type == "!":
return ''
elif backlink_type == '^':
return """<sup><a href="#noteref{0}">{1}</a></sup>""".format(
info['refids'][0], i)
else:
result = []
for refid in info['refids']:
i_entity = decode_high(i_)
sup = """<sup><a href="#noteref{0}">{1}</a></sup>""".format(
refid, i_entity)
if allow_inc:
i_ = i_ + 1
result.append(sup)
result = ' '.join(result)
return result
|
(self, info, g_links, i)
|
61,562 |
textile.core
|
markStartOfLinks
|
Finds and marks the start of well formed links in the input text.
|
def markStartOfLinks(self, text):
"""Finds and marks the start of well formed links in the input text."""
# Slice text on '":<not space>' boundaries. These always occur in
# inline links between the link text and the url part and are much more
# infrequent than '"' characters so we have less possible links to
# process.
slice_re = re.compile(r'":(?={0})'.format(regex_snippets['char']))
slices = slice_re.split(text)
output = []
if len(slices) > 1:
# There are never any start of links in the last slice, so pop it
# off (we'll glue it back later).
last_slice = slices.pop()
for s in slices:
# If there is no possible start quote then this slice is not
# a link
if '"' not in s:
output.append(s)
continue
# Cut this slice into possible starting points wherever we find
# a '"' character. Any of these parts could represent the start
# of the link text - we have to find which one.
possible_start_quotes = s.split('"')
# Start our search for the start of the link with the closest
# prior quote mark.
possibility = possible_start_quotes.pop()
# Init the balanced count. If this is still zero at the end of
# our do loop we'll mark the " that caused it to balance as the
# start of the link and move on to the next slice.
balanced = 0
linkparts = []
i = 0
while balanced != 0 or i == 0: # pragma: no branch
# Starting at the end, pop off the previous part of the
# slice's fragments.
# Add this part to those parts that make up the link text.
linkparts.append(possibility)
if len(possibility) > 0:
# did this part inc or dec the balanced count?
if re.search(r'^\S|=$', possibility, flags=re.U): # pragma: no branch
balanced = balanced - 1
if re.search(r'\S$', possibility, flags=re.U): # pragma: no branch
balanced = balanced + 1
try:
possibility = possible_start_quotes.pop()
except IndexError:
break
else:
# If quotes occur next to each other, we get zero
# length strings. eg. ...""Open the door,
# HAL!"":url... In this case we count a zero length in
# the last position as a closing quote and others as
# opening quotes.
if i == 0:
balanced = balanced + 1
else:
balanced = balanced - 1
i = i + 1
try:
possibility = possible_start_quotes.pop()
except IndexError: # pragma: no cover
# If out of possible starting segments we back the
# last one from the linkparts array
linkparts.pop()
break
# If the next possibility is empty or ends in a space
# we have a closing ".
if (possibility == '' or possibility.endswith(' ')):
# force search exit
balanced = 0;
if balanced <= 0:
possible_start_quotes.append(possibility)
break
# Rebuild the link's text by reversing the parts and sticking
# them back together with quotes.
linkparts.reverse()
link_content = '"'.join(linkparts)
# Rebuild the remaining stuff that goes before the link but
# that's already in order.
pre_link = '"'.join(possible_start_quotes)
# Re-assemble the link starts with a specific marker for the
# next regex.
o = '{0}{1}linkStartMarker:"{2}'.format(pre_link, self.uid,
link_content)
output.append(o)
# Add the last part back
output.append(last_slice)
# Re-assemble the full text with the start and end markers
text = '":'.join(output)
return text
|
(self, text)
|
61,563 |
textile.core
|
noTextile
| null |
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>',
self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
|
(self, text)
|
61,564 |
textile.core
|
noteRef
|
Search the text looking for note references.
|
def noteRef(self, text):
"""Search the text looking for note references."""
text_re = re.compile(r"""
\[ # start
({0}) # !atts
\#
([^\]!]+) # !label
([!]?) # !nolink
\]""".format(cls_re_s), re.X)
text = text_re.sub(self.fParseNoteRefs, text)
return text
|
(self, text)
|
61,565 |
textile.core
|
parse
|
Parse the input text as textile and return html output.
|
def parse(self, text, rel=None, sanitize=False):
"""Parse the input text as textile and return html output."""
self.notes = OrderedDict()
self.unreferencedNotes = OrderedDict()
self.notelist_cache = OrderedDict()
if text.strip() == '':
return text
if self.restricted:
text = encode_html(text, quotes=False)
text = normalize_newlines(text)
text = text.replace(self.uid, '')
if self.block_tags:
if self.lite:
self.blocktag_whitelist = ['bq', 'p']
text = self.block(text)
else:
self.blocktag_whitelist = [ 'bq', 'p', 'bc', 'notextile',
'pre', 'h[1-6]',
'fn{0}+'.format(regex_snippets['digit']), '###']
text = self.block(text)
text = self.placeNoteLists(text)
else:
# Inline markup (em, strong, sup, sub, del etc).
text = self.span(text)
# Glyph level substitutions (mainly typographic -- " & ' => curly
# quotes, -- => em-dash etc.
text = self.glyphs(text)
if rel:
self.rel = ' rel="{0}"'.format(rel)
text = self.getRefs(text)
if not self.lite:
text = self.placeNoteLists(text)
text = self.retrieve(text)
text = text.replace('{0}:glyph:'.format(self.uid), '')
if sanitize:
text = sanitizer.sanitize(text)
text = self.retrieveURLs(text)
# if the text contains a break tag (<br> or <br />) not followed by
# a newline, replace it with a new style break tag and a newline.
text = re.sub(r'<br( /)?>(?!\n)', '<br />\n', text)
text = text.rstrip('\n')
return text
|
(self, text, rel=None, sanitize=False)
|
61,566 |
textile.core
|
placeNoteLists
|
Parse the text for endnotes.
|
def placeNoteLists(self, text):
"""Parse the text for endnotes."""
if self.notes:
o = OrderedDict()
for label, info in self.notes.items():
if 'seq' in info:
i = info['seq']
info['seq'] = label
o[i] = info
else:
self.unreferencedNotes[label] = info
if o: # pragma: no branch
# sort o by key
o = OrderedDict(sorted(o.items(), key=lambda t: t[0]))
self.notes = o
text_re = re.compile(r'<p>notelist({0})(?:\:([\w|{1}]))?([\^!]?)(\+?)'
r'\.?[\s]*</p>'.format(cls_re_s, syms_re_s), re.U)
text = text_re.sub(self.fNoteLists, text)
return text
|
(self, text)
|
61,567 |
textile.core
|
redcloth_list
|
Parse the text for definition lists and send them to be
formatted.
|
def redcloth_list(self, text):
"""Parse the text for definition lists and send them to be
formatted."""
pattern = re.compile(r"^([-]+{0}[ .].*:=.*)$(?![^-])".format(cls_re_s),
re.M | re.U | re.S)
return pattern.sub(self.fRCList, text)
|
(self, text)
|
61,568 |
textile.core
|
refs
| null |
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
|
(self, match)
|
61,569 |
textile.core
|
relURL
| null |
def relURL(self, url):
scheme = urlparse(url)[0]
if scheme and scheme not in self.url_schemes:
return '#'
return url
|
(self, url)
|
61,570 |
textile.core
|
replaceLinks
|
Replaces links with tokens and stores them on the shelf.
|
def replaceLinks(self, text):
"""Replaces links with tokens and stores them on the shelf."""
stopchars = r"\s|^'\"*"
pattern = r"""
(?P<pre>\[)? # Optionally open with a square bracket eg. Look ["here":url]
{0}linkStartMarker:" # marks start of the link
(?P<inner>(?:.|\n)*?) # grab the content of the inner "..." part of the link, can be anything but
# do not worry about matching class, id, lang or title yet
": # literal ": marks end of atts + text + title block
(?P<urlx>[^{1}]*) # url upto a stopchar
""".format(self.uid, stopchars)
text = re.compile(pattern, flags=re.X | re.U).sub(self.fLink, text)
return text
|
(self, text)
|
61,571 |
textile.core
|
retrieve
| null |
def retrieve(self, text):
while True:
old = text
for k, v in self.shelf.items():
text = text.replace(k, v)
if text == old:
break
return text
|
(self, text)
|
61,572 |
textile.core
|
retrieveURL
| null |
def retrieveURL(self, match):
url = self.refCache.get(int(match.group('token')), '')
if url == '':
return url
if url in self.urlrefs:
url = self.urlrefs[url]
return url
|
(self, match)
|
61,573 |
textile.core
|
retrieveURLs
| null |
def retrieveURLs(self, text):
return re.sub(r'{0}(?P<token>[0-9]+):url'.format(self.uid), self.retrieveURL, text)
|
(self, text)
|
61,574 |
textile.core
|
shelve
| null |
def shelve(self, text):
self.refIndex = self.refIndex + 1
itemID = '{0}{1}:shelve'.format(self.uid, self.refIndex)
self.shelf[itemID] = text
return itemID
|
(self, text)
|
61,575 |
textile.core
|
shelveURL
| null |
def shelveURL(self, text):
if text == '':
return ''
self.refIndex = self.refIndex + 1
self.refCache[self.refIndex] = text
output = '{0}{1}{2}'.format(self.uid, self.refIndex, ':url')
return output
|
(self, text)
|
61,576 |
textile.core
|
span
| null |
def span(self, text):
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__',
r'_', r'%', r'\+', r'~', r'\^')
pnct = r""".,"'?!;:‹›«»„“”‚‘’"""
self.span_depth = self.span_depth + 1
if self.span_depth <= self.max_span_depth:
for tag in qtags:
pattern = re.compile(r"""
(?P<pre>^|(?<=[\s>{pnct}\(])|[{{[])
(?P<tag>{tag})(?!{tag})
(?P<atts>{cls})
(?!{tag})
(?::(?P<cite>\S+[^{tag}]{space}))?
(?P<content>[^{space}{tag}]+|\S.*?[^\s{tag}\n])
(?P<end>[{pnct}]*)
{tag}
(?P<tail>$|[\[\]}}<]|(?=[{pnct}]{{1,2}}[^0-9]|\s|\)))
""".format(**{'tag': tag, 'cls': cls_re_s, 'pnct': pnct,
'space': regex_snippets['space']}), flags=re.X | re.U)
text = pattern.sub(self.fSpan, text)
self.span_depth = self.span_depth - 1
return text
|
(self, text)
|
61,577 |
textile.core
|
table
| null |
def table(self, text):
text = "{0}\n\n".format(text)
pattern = re.compile(r'^(?:table(?P<tatts>_?{s}{a}{c})\.'
r'(?P<summary>.*?)\n)?^(?P<rows>{a}{c}\.? ?\|.*\|)'
r'[\s]*\n\n'.format(**{'s': table_span_re_s, 'a': align_re_s,
'c': cls_re_s}), flags=re.S | re.M | re.U)
match = pattern.search(text)
if match:
table = Table(self, **match.groupdict())
return table.process()
return text
|
(self, text)
|
61,578 |
textile.core
|
textileLists
| null |
def textileLists(self, text):
pattern = re.compile(r'^((?:[*;:]+|[*;:#]*#(?:_|\d+)?){0}[ .].*)$'
r'(?![^#*;:])'.format(cls_re_s), re.U | re.M | re.S)
return pattern.sub(self.fTextileList, text)
|
(self, text)
|
61,583 |
textile.core
|
textile
|
Apply Textile to a block of text.
This function takes the following additional parameters:
html_type - 'xhtml' or 'html5' style tags (default: 'xhtml')
|
def textile(text, html_type='xhtml'):
"""
Apply Textile to a block of text.
This function takes the following additional parameters:
html_type - 'xhtml' or 'html5' style tags (default: 'xhtml')
"""
return Textile(html_type=html_type).parse(text)
|
(text, html_type='xhtml')
|
61,584 |
textile.core
|
textile_restricted
|
Apply Textile to a block of text, with restrictions designed for weblog
comments and other untrusted input. Raw HTML is escaped, style attributes
are disabled, and rel='nofollow' is added to external links.
This function takes the following additional parameters:
html_type - 'xhtml' or 'html5' style tags (default: 'xhtml')
lite - restrict block tags to p, bq, and bc, disable tables (default: True)
noimage - disable image tags (default: True)
|
def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
"""
Apply Textile to a block of text, with restrictions designed for weblog
comments and other untrusted input. Raw HTML is escaped, style attributes
are disabled, and rel='nofollow' is added to external links.
This function takes the following additional parameters:
html_type - 'xhtml' or 'html5' style tags (default: 'xhtml')
lite - restrict block tags to p, bq, and bc, disable tables (default: True)
noimage - disable image tags (default: True)
"""
return Textile(restricted=True, lite=lite, noimage=noimage,
html_type=html_type, rel='nofollow').parse(
text)
|
(text, lite=True, noimage=True, html_type='xhtml')
|
61,589 |
hyperopt.base
|
Ctrl
|
Control object for interruptible, checkpoint-able evaluation
|
class Ctrl:
"""Control object for interruptible, checkpoint-able evaluation"""
info = logger.info
warn = logger.warning
error = logger.error
debug = logger.debug
def __init__(self, trials, current_trial=None):
# -- attachments should be used like
# attachments[key]
# attachments[key] = value
# where key and value are strings. Client code should not
# expect any dictionary-like behaviour beyond that (no update)
if trials is None:
self.trials = Trials()
else:
self.trials = trials
self.current_trial = current_trial
def checkpoint(self, r=None):
assert self.current_trial in self.trials._trials
if r is not None:
self.current_trial["result"] = r
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
def inject_results(self, specs, results, miscs, new_tids=None):
"""Inject new results into self.trials
Returns ??? XXX
new_tids can be None, in which case new tids will be generated
automatically
"""
trial = self.current_trial
assert trial is not None
num_news = len(specs)
assert len(specs) == len(results) == len(miscs)
if new_tids is None:
new_tids = self.trials.new_trial_ids(num_news)
new_trials = self.trials.source_trial_docs(
tids=new_tids, specs=specs, results=results, miscs=miscs, sources=[trial]
)
for t in new_trials:
t["state"] = JOB_STATE_DONE
return self.trials.insert_trial_docs(new_trials)
|
(trials, current_trial=None)
|
61,590 |
hyperopt.base
|
__init__
| null |
def __init__(self, trials, current_trial=None):
# -- attachments should be used like
# attachments[key]
# attachments[key] = value
# where key and value are strings. Client code should not
# expect any dictionary-like behaviour beyond that (no update)
if trials is None:
self.trials = Trials()
else:
self.trials = trials
self.current_trial = current_trial
|
(self, trials, current_trial=None)
|
61,591 |
hyperopt.base
|
checkpoint
| null |
def checkpoint(self, r=None):
assert self.current_trial in self.trials._trials
if r is not None:
self.current_trial["result"] = r
|
(self, r=None)
|
61,592 |
hyperopt.base
|
inject_results
|
Inject new results into self.trials
Returns ??? XXX
new_tids can be None, in which case new tids will be generated
automatically
|
def inject_results(self, specs, results, miscs, new_tids=None):
"""Inject new results into self.trials
Returns ??? XXX
new_tids can be None, in which case new tids will be generated
automatically
"""
trial = self.current_trial
assert trial is not None
num_news = len(specs)
assert len(specs) == len(results) == len(miscs)
if new_tids is None:
new_tids = self.trials.new_trial_ids(num_news)
new_trials = self.trials.source_trial_docs(
tids=new_tids, specs=specs, results=results, miscs=miscs, sources=[trial]
)
for t in new_trials:
t["state"] = JOB_STATE_DONE
return self.trials.insert_trial_docs(new_trials)
|
(self, specs, results, miscs, new_tids=None)
|
61,593 |
hyperopt.base
|
Domain
|
Picklable representation of search space and evaluation function.
|
class Domain:
"""Picklable representation of search space and evaluation function."""
rec_eval_print_node_on_error = False
# -- the Ctrl object is not used directly, but rather
# a live Ctrl instance is inserted for the pyll_ctrl
# in self.evaluate so that it can be accessed from within
# the pyll graph describing the search space.
pyll_ctrl = pyll.as_apply(Ctrl)
def __init__(
self,
fn,
expr,
workdir=None,
pass_expr_memo_ctrl=None,
name=None,
loss_target=None,
):
"""
Parameters
----------
fn : callable
This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`)
expr : hyperopt.pyll.Apply
This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`)
workdir : string (or None)
If non-None, the current working directory will be `workdir`while
`expr` and `fn` are evaluated. (XXX Currently only respected by
jobs run via MongoWorker)
pass_expr_memo_ctrl : bool
If True, `fn` will be called like this:
`fn(self.expr, memo, ctrl)`,
where `memo` is a dictionary mapping `Apply` nodes to their
computed values, and `ctrl` is a `Ctrl` instance for communicating
with a Trials database. This lower-level calling convention is
useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself
in some customized way.
name : string (or None)
Label, used for pretty-printing.
loss_target : float (or None)
The actual or estimated minimum of `fn`.
Some optimization algorithms may behave differently if their first
objective is to find an input that achieves a certain value,
rather than the more open-ended objective of pure minimization.
XXX: Move this from Domain to be an fmin arg.
"""
self.fn = fn
if pass_expr_memo_ctrl is None:
self.pass_expr_memo_ctrl = getattr(fn, "fmin_pass_expr_memo_ctrl", False)
else:
self.pass_expr_memo_ctrl = pass_expr_memo_ctrl
self.expr = pyll.as_apply(expr)
self.params = {}
for node in pyll.dfs(self.expr):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
if label in self.params:
raise DuplicateLabel(label)
self.params[label] = node.arg["obj"]
self.loss_target = loss_target
self.name = name
self.workdir = workdir
self.s_new_ids = pyll.Literal("new_ids") # -- list at eval-time
before = pyll.dfs(self.expr)
# -- raises exception if expr contains cycles
pyll.toposort(self.expr)
vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids)
# -- raises exception if v_expr contains cycles
pyll.toposort(vh.v_expr)
idxs_by_label = vh.idxs_by_label()
vals_by_label = vh.vals_by_label()
after = pyll.dfs(self.expr)
# -- try to detect if VectorizeHelper screwed up anything inplace
assert before == after
assert set(idxs_by_label.keys()) == set(vals_by_label.keys())
assert set(idxs_by_label.keys()) == set(self.params.keys())
self.s_rng = pyll.Literal("rng-placeholder")
# -- N.B. operates inplace:
self.s_idxs_vals = recursive_set_rng_kwarg(
pyll.scope.pos_args(idxs_by_label, vals_by_label), self.s_rng
)
# -- raises an exception if no topological ordering exists
pyll.toposort(self.s_idxs_vals)
# -- Protocol for serialization.
# self.cmd indicates to e.g. MongoWorker how this domain
# should be [un]serialized.
# XXX This mechanism deserves review as support for ipython
# workers improves.
self.cmd = ("domain_attachment", "FMinIter_Domain")
def memo_from_config(self, config):
memo = {}
for node in pyll.dfs(self.expr):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
# -- hack because it's not really garbagecollected
# this does have the desired effect of crashing the
# function if rec_eval actually needs a value that
# the the optimization algorithm thought to be unnecessary
memo[node] = config.get(label, pyll.base.GarbageCollected)
return memo
def evaluate(self, config, ctrl, attach_attachments=True):
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error,
)
rval = self.fn(pyll_rval)
if isinstance(rval, (float, int, np.number)):
dict_rval = {"loss": float(rval), "status": STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval["status"]
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval["loss"] = float(dict_rval["loss"])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop("attachments", {})
for key, val in list(attachments.items()):
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
return dict_rval
def evaluate_async(self, config, ctrl, attach_attachments=True):
"""
this is the first part of async evaluation for ipython parallel engines (see ipy.py)
This breaks evaluate into two parts to allow for the apply_async call
to only pass the objective function and arguments.
"""
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
pyll_rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error,
)
return (self.fn, pyll_rval)
def evaluate_async2(self, rval, ctrl, attach_attachments=True):
"""
this is the second part of async evaluation for ipython parallel engines (see ipy.py)
"""
if isinstance(rval, (float, int, np.number)):
dict_rval = {"loss": float(rval), "status": STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval["status"]
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval["loss"] = float(dict_rval["loss"])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop("attachments", {})
for key, val in list(attachments.items()):
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
return dict_rval
def short_str(self):
return "Domain{%s}" % str(self.fn)
def loss(self, result, config=None):
"""Extract the scalar-valued loss from a result document"""
return result.get("loss", None)
def loss_variance(self, result, config=None):
"""Return the variance in the estimate of the loss"""
return result.get("loss_variance", 0.0)
def true_loss(self, result, config=None):
"""Return a true loss, in the case that the `loss` is a surrogate"""
# N.B. don't use get() here, it evaluates self.loss un-necessarily
try:
return result["true_loss"]
except KeyError:
return self.loss(result, config=config)
def true_loss_variance(self, config=None):
"""Return the variance in true loss,
in the case that the `loss` is a surrogate.
"""
raise NotImplementedError()
def status(self, result, config=None):
"""Extract the job status from a result document"""
return result["status"]
def new_result(self):
"""Return a JSON-encodable object
to serve as the 'result' for new jobs.
"""
return {"status": STATUS_NEW}
|
(fn, expr, workdir=None, pass_expr_memo_ctrl=None, name=None, loss_target=None)
|
61,594 |
hyperopt.base
|
__init__
|
Parameters
----------
fn : callable
This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`)
expr : hyperopt.pyll.Apply
This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`)
workdir : string (or None)
If non-None, the current working directory will be `workdir`while
`expr` and `fn` are evaluated. (XXX Currently only respected by
jobs run via MongoWorker)
pass_expr_memo_ctrl : bool
If True, `fn` will be called like this:
`fn(self.expr, memo, ctrl)`,
where `memo` is a dictionary mapping `Apply` nodes to their
computed values, and `ctrl` is a `Ctrl` instance for communicating
with a Trials database. This lower-level calling convention is
useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself
in some customized way.
name : string (or None)
Label, used for pretty-printing.
loss_target : float (or None)
The actual or estimated minimum of `fn`.
Some optimization algorithms may behave differently if their first
objective is to find an input that achieves a certain value,
rather than the more open-ended objective of pure minimization.
XXX: Move this from Domain to be an fmin arg.
|
def __init__(
self,
fn,
expr,
workdir=None,
pass_expr_memo_ctrl=None,
name=None,
loss_target=None,
):
"""
Parameters
----------
fn : callable
This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`)
expr : hyperopt.pyll.Apply
This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`)
workdir : string (or None)
If non-None, the current working directory will be `workdir`while
`expr` and `fn` are evaluated. (XXX Currently only respected by
jobs run via MongoWorker)
pass_expr_memo_ctrl : bool
If True, `fn` will be called like this:
`fn(self.expr, memo, ctrl)`,
where `memo` is a dictionary mapping `Apply` nodes to their
computed values, and `ctrl` is a `Ctrl` instance for communicating
with a Trials database. This lower-level calling convention is
useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself
in some customized way.
name : string (or None)
Label, used for pretty-printing.
loss_target : float (or None)
The actual or estimated minimum of `fn`.
Some optimization algorithms may behave differently if their first
objective is to find an input that achieves a certain value,
rather than the more open-ended objective of pure minimization.
XXX: Move this from Domain to be an fmin arg.
"""
self.fn = fn
if pass_expr_memo_ctrl is None:
self.pass_expr_memo_ctrl = getattr(fn, "fmin_pass_expr_memo_ctrl", False)
else:
self.pass_expr_memo_ctrl = pass_expr_memo_ctrl
self.expr = pyll.as_apply(expr)
self.params = {}
for node in pyll.dfs(self.expr):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
if label in self.params:
raise DuplicateLabel(label)
self.params[label] = node.arg["obj"]
self.loss_target = loss_target
self.name = name
self.workdir = workdir
self.s_new_ids = pyll.Literal("new_ids") # -- list at eval-time
before = pyll.dfs(self.expr)
# -- raises exception if expr contains cycles
pyll.toposort(self.expr)
vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids)
# -- raises exception if v_expr contains cycles
pyll.toposort(vh.v_expr)
idxs_by_label = vh.idxs_by_label()
vals_by_label = vh.vals_by_label()
after = pyll.dfs(self.expr)
# -- try to detect if VectorizeHelper screwed up anything inplace
assert before == after
assert set(idxs_by_label.keys()) == set(vals_by_label.keys())
assert set(idxs_by_label.keys()) == set(self.params.keys())
self.s_rng = pyll.Literal("rng-placeholder")
# -- N.B. operates inplace:
self.s_idxs_vals = recursive_set_rng_kwarg(
pyll.scope.pos_args(idxs_by_label, vals_by_label), self.s_rng
)
# -- raises an exception if no topological ordering exists
pyll.toposort(self.s_idxs_vals)
# -- Protocol for serialization.
# self.cmd indicates to e.g. MongoWorker how this domain
# should be [un]serialized.
# XXX This mechanism deserves review as support for ipython
# workers improves.
self.cmd = ("domain_attachment", "FMinIter_Domain")
|
(self, fn, expr, workdir=None, pass_expr_memo_ctrl=None, name=None, loss_target=None)
|
61,595 |
hyperopt.base
|
evaluate
| null |
def evaluate(self, config, ctrl, attach_attachments=True):
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error,
)
rval = self.fn(pyll_rval)
if isinstance(rval, (float, int, np.number)):
dict_rval = {"loss": float(rval), "status": STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval["status"]
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval["loss"] = float(dict_rval["loss"])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop("attachments", {})
for key, val in list(attachments.items()):
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
return dict_rval
|
(self, config, ctrl, attach_attachments=True)
|
61,596 |
hyperopt.base
|
evaluate_async
|
this is the first part of async evaluation for ipython parallel engines (see ipy.py)
This breaks evaluate into two parts to allow for the apply_async call
to only pass the objective function and arguments.
|
def evaluate_async(self, config, ctrl, attach_attachments=True):
"""
this is the first part of async evaluation for ipython parallel engines (see ipy.py)
This breaks evaluate into two parts to allow for the apply_async call
to only pass the objective function and arguments.
"""
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
pyll_rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error,
)
return (self.fn, pyll_rval)
|
(self, config, ctrl, attach_attachments=True)
|
61,597 |
hyperopt.base
|
evaluate_async2
|
this is the second part of async evaluation for ipython parallel engines (see ipy.py)
|
def evaluate_async2(self, rval, ctrl, attach_attachments=True):
"""
this is the second part of async evaluation for ipython parallel engines (see ipy.py)
"""
if isinstance(rval, (float, int, np.number)):
dict_rval = {"loss": float(rval), "status": STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval["status"]
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval["loss"] = float(dict_rval["loss"])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop("attachments", {})
for key, val in list(attachments.items()):
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
return dict_rval
|
(self, rval, ctrl, attach_attachments=True)
|
61,598 |
hyperopt.base
|
loss
|
Extract the scalar-valued loss from a result document
|
def loss(self, result, config=None):
"""Extract the scalar-valued loss from a result document"""
return result.get("loss", None)
|
(self, result, config=None)
|
61,599 |
hyperopt.base
|
loss_variance
|
Return the variance in the estimate of the loss
|
def loss_variance(self, result, config=None):
"""Return the variance in the estimate of the loss"""
return result.get("loss_variance", 0.0)
|
(self, result, config=None)
|
61,600 |
hyperopt.base
|
memo_from_config
| null |
def memo_from_config(self, config):
memo = {}
for node in pyll.dfs(self.expr):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
# -- hack because it's not really garbagecollected
# this does have the desired effect of crashing the
# function if rec_eval actually needs a value that
# the the optimization algorithm thought to be unnecessary
memo[node] = config.get(label, pyll.base.GarbageCollected)
return memo
|
(self, config)
|
61,601 |
hyperopt.base
|
new_result
|
Return a JSON-encodable object
to serve as the 'result' for new jobs.
|
def new_result(self):
"""Return a JSON-encodable object
to serve as the 'result' for new jobs.
"""
return {"status": STATUS_NEW}
|
(self)
|
61,602 |
hyperopt.base
|
short_str
| null |
def short_str(self):
return "Domain{%s}" % str(self.fn)
|
(self)
|
61,603 |
hyperopt.base
|
status
|
Extract the job status from a result document
|
def status(self, result, config=None):
"""Extract the job status from a result document"""
return result["status"]
|
(self, result, config=None)
|
61,604 |
hyperopt.base
|
true_loss
|
Return a true loss, in the case that the `loss` is a surrogate
|
def true_loss(self, result, config=None):
"""Return a true loss, in the case that the `loss` is a surrogate"""
# N.B. don't use get() here, it evaluates self.loss un-necessarily
try:
return result["true_loss"]
except KeyError:
return self.loss(result, config=config)
|
(self, result, config=None)
|
61,605 |
hyperopt.base
|
true_loss_variance
|
Return the variance in true loss,
in the case that the `loss` is a surrogate.
|
def true_loss_variance(self, config=None):
"""Return the variance in true loss,
in the case that the `loss` is a surrogate.
"""
raise NotImplementedError()
|
(self, config=None)
|
61,606 |
hyperopt.fmin
|
FMinIter
|
Object for conducting search experiments.
|
class FMinIter:
"""Object for conducting search experiments."""
catch_eval_exceptions = False
pickle_protocol = -1
def __init__(
self,
algo,
domain,
trials,
rstate,
asynchronous=None,
max_queue_len=1,
poll_interval_secs=1.0,
max_evals=sys.maxsize,
timeout=None,
loss_threshold=None,
verbose=False,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
self.algo = algo
self.domain = domain
self.trials = trials
if not show_progressbar or not verbose:
self.progress_callback = progress.no_progress_callback
elif show_progressbar is True:
self.progress_callback = progress.default_callback
else:
self.progress_callback = show_progressbar
if asynchronous is None:
self.asynchronous = trials.asynchronous
else:
self.asynchronous = asynchronous
self.poll_interval_secs = poll_interval_secs
self.max_queue_len = max_queue_len
self.max_evals = max_evals
self.early_stop_fn = early_stop_fn
self.early_stop_args = []
self.trials_save_file = trials_save_file
self.timeout = timeout
self.loss_threshold = loss_threshold
self.start_time = timer()
self.rstate = rstate
self.verbose = verbose
if self.asynchronous:
if "FMinIter_Domain" in trials.attachments:
logger.warning("over-writing old domain trials attachment")
msg = pickler.dumps(domain)
# -- sanity check for unpickling
pickler.loads(msg)
trials.attachments["FMinIter_Domain"] = msg
def serial_evaluate(self, N=-1):
for trial in self.trials._dynamic_trials:
if trial["state"] == base.JOB_STATE_NEW:
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
spec = base.spec_from_misc(trial["misc"])
ctrl = base.Ctrl(self.trials, current_trial=trial)
try:
result = self.domain.evaluate(spec, ctrl)
except Exception as e:
logger.error("job exception: %s" % str(e))
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
if not self.catch_eval_exceptions:
# -- JOB_STATE_ERROR means this trial
# will be removed from self.trials.trials
# by this refresh call.
self.trials.refresh()
raise
else:
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
N -= 1
if N == 0:
break
self.trials.refresh()
@property
def is_cancelled(self):
"""
Indicates whether this fmin run has been cancelled. SparkTrials supports cancellation.
"""
if hasattr(self.trials, "_fmin_cancelled"):
if self.trials._fmin_cancelled:
return True
return False
def block_until_done(self):
already_printed = False
if self.asynchronous:
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
def get_queue_len():
return self.trials.count_by_state_unsynced(unfinished_states)
qlen = get_queue_len()
while qlen > 0:
if not already_printed and self.verbose:
logger.info("Waiting for %d jobs to finish ..." % qlen)
already_printed = True
time.sleep(self.poll_interval_secs)
qlen = get_queue_len()
self.trials.refresh()
else:
self.serial_evaluate()
def run(self, N, block_until_done=True):
"""
Run `self.algo` iteratively (use existing `self.trials` to produce the new
ones), update, and repeat
block_until_done means that the process blocks until ALL jobs in
trials are not in running or new state
"""
trials = self.trials
algo = self.algo
n_queued = 0
def get_queue_len():
return self.trials.count_by_state_unsynced(base.JOB_STATE_NEW)
def get_n_done():
return self.trials.count_by_state_unsynced(base.JOB_STATE_DONE)
def get_n_unfinished():
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
return self.trials.count_by_state_unsynced(unfinished_states)
stopped = False
initial_n_done = get_n_done()
with self.progress_callback(
initial=initial_n_done, total=self.max_evals
) as progress_ctx:
all_trials_complete = False
best_loss = float("inf")
while (
# more run to Q || ( block_flag & trials not done )
(n_queued < N or (block_until_done and not all_trials_complete))
# no timeout || < current last time
and (self.timeout is None or (timer() - self.start_time) < self.timeout)
# no loss_threshold || < current best_loss
and (self.loss_threshold is None or best_loss >= self.loss_threshold)
):
qlen = get_queue_len()
while (
qlen < self.max_queue_len and n_queued < N and not self.is_cancelled
):
n_to_enqueue = min(self.max_queue_len - qlen, N - n_queued)
# get ids for next trials to enqueue
new_ids = trials.new_trial_ids(n_to_enqueue)
self.trials.refresh()
# Based on existing trials and the domain, use `algo` to probe in
# new hp points. Save the results of those inspections into
# `new_trials`. This is the core of `run`, all the rest is just
# processes orchestration
new_trials = algo(
new_ids, self.domain, trials, self.rstate.integers(2 ** 31 - 1)
)
assert len(new_ids) >= len(new_trials)
if len(new_trials):
self.trials.insert_trial_docs(new_trials)
self.trials.refresh()
n_queued += len(new_trials)
qlen = get_queue_len()
else:
stopped = True
break
if self.is_cancelled:
break
if self.asynchronous:
# -- wait for workers to fill in the trials
time.sleep(self.poll_interval_secs)
else:
# -- loop over trials and do the jobs directly
self.serial_evaluate()
self.trials.refresh()
if self.trials_save_file != "":
pickler.dump(self.trials, open(self.trials_save_file, "wb"))
if self.early_stop_fn is not None:
stop, kwargs = self.early_stop_fn(
self.trials, *self.early_stop_args
)
self.early_stop_args = kwargs
if stop:
logger.info(
"Early stop triggered. Stopping iterations as condition is reach."
)
stopped = True
# update progress bar with the min loss among trials with status ok
losses = [
loss
for loss in self.trials.losses()
if loss is not None and not np.isnan(loss)
]
if losses:
best_loss = min(losses)
progress_ctx.postfix = "best loss: " + str(best_loss)
n_unfinished = get_n_unfinished()
if n_unfinished == 0:
all_trials_complete = True
n_done = get_n_done()
n_done_this_iteration = n_done - initial_n_done
if n_done_this_iteration > 0:
progress_ctx.update(n_done_this_iteration)
initial_n_done = n_done
if stopped:
break
if block_until_done:
self.block_until_done()
self.trials.refresh()
logger.info("Queue empty, exiting run.")
else:
qlen = get_queue_len()
if qlen:
msg = "Exiting run, not waiting for %d jobs." % qlen
logger.info(msg)
def __iter__(self):
return self
def __next__(self):
self.run(1, block_until_done=self.asynchronous)
if self.early_stop_fn is not None:
stop, kwargs = self.early_stop_fn(self.trials, *self.early_stop_args)
self.early_stop_args = kwargs
if stop:
raise StopIteration()
if len(self.trials) >= self.max_evals:
raise StopIteration()
return self.trials
def exhaust(self):
n_done = len(self.trials)
self.run(self.max_evals - n_done, block_until_done=self.asynchronous)
self.trials.refresh()
return self
|
(algo, domain, trials, rstate, asynchronous=None, max_queue_len=1, poll_interval_secs=1.0, max_evals=9223372036854775807, timeout=None, loss_threshold=None, verbose=False, show_progressbar=True, early_stop_fn=None, trials_save_file='')
|
61,607 |
hyperopt.fmin
|
__init__
| null |
def __init__(
self,
algo,
domain,
trials,
rstate,
asynchronous=None,
max_queue_len=1,
poll_interval_secs=1.0,
max_evals=sys.maxsize,
timeout=None,
loss_threshold=None,
verbose=False,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
self.algo = algo
self.domain = domain
self.trials = trials
if not show_progressbar or not verbose:
self.progress_callback = progress.no_progress_callback
elif show_progressbar is True:
self.progress_callback = progress.default_callback
else:
self.progress_callback = show_progressbar
if asynchronous is None:
self.asynchronous = trials.asynchronous
else:
self.asynchronous = asynchronous
self.poll_interval_secs = poll_interval_secs
self.max_queue_len = max_queue_len
self.max_evals = max_evals
self.early_stop_fn = early_stop_fn
self.early_stop_args = []
self.trials_save_file = trials_save_file
self.timeout = timeout
self.loss_threshold = loss_threshold
self.start_time = timer()
self.rstate = rstate
self.verbose = verbose
if self.asynchronous:
if "FMinIter_Domain" in trials.attachments:
logger.warning("over-writing old domain trials attachment")
msg = pickler.dumps(domain)
# -- sanity check for unpickling
pickler.loads(msg)
trials.attachments["FMinIter_Domain"] = msg
|
(self, algo, domain, trials, rstate, asynchronous=None, max_queue_len=1, poll_interval_secs=1.0, max_evals=9223372036854775807, timeout=None, loss_threshold=None, verbose=False, show_progressbar=True, early_stop_fn=None, trials_save_file='')
|
61,609 |
hyperopt.fmin
|
__next__
| null |
def __next__(self):
self.run(1, block_until_done=self.asynchronous)
if self.early_stop_fn is not None:
stop, kwargs = self.early_stop_fn(self.trials, *self.early_stop_args)
self.early_stop_args = kwargs
if stop:
raise StopIteration()
if len(self.trials) >= self.max_evals:
raise StopIteration()
return self.trials
|
(self)
|
61,610 |
hyperopt.fmin
|
block_until_done
| null |
def block_until_done(self):
already_printed = False
if self.asynchronous:
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
def get_queue_len():
return self.trials.count_by_state_unsynced(unfinished_states)
qlen = get_queue_len()
while qlen > 0:
if not already_printed and self.verbose:
logger.info("Waiting for %d jobs to finish ..." % qlen)
already_printed = True
time.sleep(self.poll_interval_secs)
qlen = get_queue_len()
self.trials.refresh()
else:
self.serial_evaluate()
|
(self)
|
61,611 |
hyperopt.fmin
|
exhaust
| null |
def exhaust(self):
n_done = len(self.trials)
self.run(self.max_evals - n_done, block_until_done=self.asynchronous)
self.trials.refresh()
return self
|
(self)
|
61,612 |
hyperopt.fmin
|
run
|
Run `self.algo` iteratively (use existing `self.trials` to produce the new
ones), update, and repeat
block_until_done means that the process blocks until ALL jobs in
trials are not in running or new state
|
def run(self, N, block_until_done=True):
"""
Run `self.algo` iteratively (use existing `self.trials` to produce the new
ones), update, and repeat
block_until_done means that the process blocks until ALL jobs in
trials are not in running or new state
"""
trials = self.trials
algo = self.algo
n_queued = 0
def get_queue_len():
return self.trials.count_by_state_unsynced(base.JOB_STATE_NEW)
def get_n_done():
return self.trials.count_by_state_unsynced(base.JOB_STATE_DONE)
def get_n_unfinished():
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
return self.trials.count_by_state_unsynced(unfinished_states)
stopped = False
initial_n_done = get_n_done()
with self.progress_callback(
initial=initial_n_done, total=self.max_evals
) as progress_ctx:
all_trials_complete = False
best_loss = float("inf")
while (
# more run to Q || ( block_flag & trials not done )
(n_queued < N or (block_until_done and not all_trials_complete))
# no timeout || < current last time
and (self.timeout is None or (timer() - self.start_time) < self.timeout)
# no loss_threshold || < current best_loss
and (self.loss_threshold is None or best_loss >= self.loss_threshold)
):
qlen = get_queue_len()
while (
qlen < self.max_queue_len and n_queued < N and not self.is_cancelled
):
n_to_enqueue = min(self.max_queue_len - qlen, N - n_queued)
# get ids for next trials to enqueue
new_ids = trials.new_trial_ids(n_to_enqueue)
self.trials.refresh()
# Based on existing trials and the domain, use `algo` to probe in
# new hp points. Save the results of those inspections into
# `new_trials`. This is the core of `run`, all the rest is just
# processes orchestration
new_trials = algo(
new_ids, self.domain, trials, self.rstate.integers(2 ** 31 - 1)
)
assert len(new_ids) >= len(new_trials)
if len(new_trials):
self.trials.insert_trial_docs(new_trials)
self.trials.refresh()
n_queued += len(new_trials)
qlen = get_queue_len()
else:
stopped = True
break
if self.is_cancelled:
break
if self.asynchronous:
# -- wait for workers to fill in the trials
time.sleep(self.poll_interval_secs)
else:
# -- loop over trials and do the jobs directly
self.serial_evaluate()
self.trials.refresh()
if self.trials_save_file != "":
pickler.dump(self.trials, open(self.trials_save_file, "wb"))
if self.early_stop_fn is not None:
stop, kwargs = self.early_stop_fn(
self.trials, *self.early_stop_args
)
self.early_stop_args = kwargs
if stop:
logger.info(
"Early stop triggered. Stopping iterations as condition is reach."
)
stopped = True
# update progress bar with the min loss among trials with status ok
losses = [
loss
for loss in self.trials.losses()
if loss is not None and not np.isnan(loss)
]
if losses:
best_loss = min(losses)
progress_ctx.postfix = "best loss: " + str(best_loss)
n_unfinished = get_n_unfinished()
if n_unfinished == 0:
all_trials_complete = True
n_done = get_n_done()
n_done_this_iteration = n_done - initial_n_done
if n_done_this_iteration > 0:
progress_ctx.update(n_done_this_iteration)
initial_n_done = n_done
if stopped:
break
if block_until_done:
self.block_until_done()
self.trials.refresh()
logger.info("Queue empty, exiting run.")
else:
qlen = get_queue_len()
if qlen:
msg = "Exiting run, not waiting for %d jobs." % qlen
logger.info(msg)
|
(self, N, block_until_done=True)
|
61,613 |
hyperopt.fmin
|
serial_evaluate
| null |
def serial_evaluate(self, N=-1):
for trial in self.trials._dynamic_trials:
if trial["state"] == base.JOB_STATE_NEW:
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
spec = base.spec_from_misc(trial["misc"])
ctrl = base.Ctrl(self.trials, current_trial=trial)
try:
result = self.domain.evaluate(spec, ctrl)
except Exception as e:
logger.error("job exception: %s" % str(e))
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
if not self.catch_eval_exceptions:
# -- JOB_STATE_ERROR means this trial
# will be removed from self.trials.trials
# by this refresh call.
self.trials.refresh()
raise
else:
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
N -= 1
if N == 0:
break
self.trials.refresh()
|
(self, N=-1)
|
61,614 |
hyperopt.spark
|
SparkTrials
|
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
|
class SparkTrials(Trials):
"""
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
"""
asynchronous = True
# Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128.
MAX_CONCURRENT_JOBS_ALLOWED = 128
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super().__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
self._spark_pinned_threads_enabled = isinstance(
self._spark_context._gateway, ClientServer
)
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = (
self._spark_pinned_threads_enabled
or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup")
)
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
@staticmethod
def _decide_parallelism(requested_parallelism, spark_default_parallelism):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism, s=spark_default_parallelism
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
return parallelism
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
def delete_all(self):
"""
Reset the Trials to init state
"""
super().delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
trials_save_file="",
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not supported
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not supported
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file="", # not supported
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
|
(parallelism=None, timeout=None, loss_threshold=None, spark_session=None)
|
61,615 |
hyperopt.base
|
__getitem__
| null |
def __getitem__(self, item):
# -- how to make it obvious whether indexing is by _trials position
# or by tid if both are integers?
raise NotImplementedError("")
|
(self, item)
|
61,616 |
hyperopt.spark
|
__init__
|
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
|
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super().__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
self._spark_pinned_threads_enabled = isinstance(
self._spark_context._gateway, ClientServer
)
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = (
self._spark_pinned_threads_enabled
or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup")
)
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
|
(self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None)
|
61,617 |
hyperopt.base
|
__iter__
| null |
def __iter__(self):
try:
return iter(self._trials)
except AttributeError:
print("You have to refresh before you iterate", file=sys.stderr)
raise
|
(self)
|
61,618 |
hyperopt.base
|
__len__
| null |
def __len__(self):
try:
return len(self._trials)
except AttributeError:
print("You have to refresh before you compute len", file=sys.stderr)
raise
|
(self)
|
61,619 |
hyperopt.spark
|
_decide_parallelism
|
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
|
@staticmethod
def _decide_parallelism(requested_parallelism, spark_default_parallelism):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism, s=spark_default_parallelism
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
return parallelism
|
(requested_parallelism, spark_default_parallelism)
|
61,620 |
hyperopt.base
|
_insert_trial_docs
|
insert with no error checking
|
def _insert_trial_docs(self, docs):
"""insert with no error checking"""
rval = [doc["tid"] for doc in docs]
self._dynamic_trials.extend(docs)
return rval
|
(self, docs)
|
61,621 |
hyperopt.base
|
aname
| null |
def aname(self, trial, name):
return "ATTACH::{}::{}".format(trial["tid"], name)
|
(self, trial, name)
|
61,622 |
hyperopt.base
|
assert_valid_trial
| null |
def assert_valid_trial(self, trial):
if not (hasattr(trial, "keys") and hasattr(trial, "values")):
raise InvalidTrial("trial should be dict-like", trial)
for key in TRIAL_KEYS:
if key not in trial:
raise InvalidTrial("trial missing key %s", key)
for key in TRIAL_MISC_KEYS:
if key not in trial["misc"]:
raise InvalidTrial('trial["misc"] missing key', key)
if trial["tid"] != trial["misc"]["tid"]:
raise InvalidTrial("tid mismatch between root and misc", trial)
# -- check for SON-encodable
if have_bson:
try:
bson.BSON.encode(trial)
except:
# TODO: save the trial object somewhere to inspect, fix, re-insert
# so that precious data is not simply deallocated and lost.
print("-" * 80)
print("CAN'T ENCODE")
print("-" * 80)
raise
if trial["exp_key"] != self._exp_key:
raise InvalidTrial("wrong exp_key", (trial["exp_key"], self._exp_key))
# XXX how to assert that tids are unique?
return trial
|
(self, trial)
|
61,623 |
hyperopt.base
|
average_best_error
|
Return the average best error of the experiment
Average best error is defined as the average of bandit.true_loss,
weighted by the probability that the corresponding bandit.loss is best.
For domains with loss measurement variance of 0, this function simply
returns the true_loss corresponding to the result with the lowest loss.
|
def average_best_error(self, bandit=None):
"""Return the average best error of the experiment
Average best error is defined as the average of bandit.true_loss,
weighted by the probability that the corresponding bandit.loss is best.
For domains with loss measurement variance of 0, this function simply
returns the true_loss corresponding to the result with the lowest loss.
"""
if bandit is None:
results = self.results
loss = [r["loss"] for r in results if r["status"] == STATUS_OK]
loss_v = [
r.get("loss_variance", 0) for r in results if r["status"] == STATUS_OK
]
true_loss = [
r.get("true_loss", r["loss"])
for r in results
if r["status"] == STATUS_OK
]
else:
def fmap(f):
rval = np.asarray(
[
f(r, s)
for (r, s) in zip(self.results, self.specs)
if bandit.status(r) == STATUS_OK
]
).astype("float")
if not np.all(np.isfinite(rval)):
raise ValueError()
return rval
loss = fmap(bandit.loss)
loss_v = fmap(bandit.loss_variance)
true_loss = fmap(bandit.true_loss)
loss3 = list(zip(loss, loss_v, true_loss))
if not loss3:
raise ValueError("Empty loss vector")
loss3.sort()
loss3 = np.asarray(loss3)
if np.all(loss3[:, 1] == 0):
best_idx = np.argmin(loss3[:, 0])
return loss3[best_idx, 2]
else:
cutoff = 0
sigma = np.sqrt(loss3[0][1])
while cutoff < len(loss3) and loss3[cutoff][0] < loss3[0][0] + 3 * sigma:
cutoff += 1
pmin = pmin_sampled(loss3[:cutoff, 0], loss3[:cutoff, 1])
avg_true_loss = (pmin * loss3[:cutoff, 2]).sum()
return avg_true_loss
|
(self, bandit=None)
|
61,624 |
hyperopt.base
|
count_by_state_synced
|
Return trial counts by looking at self._trials
|
def count_by_state_synced(self, arg, trials=None):
"""
Return trial counts by looking at self._trials
"""
if trials is None:
trials = self._trials
if arg in JOB_STATES:
queue = [doc for doc in trials if doc["state"] == arg]
elif hasattr(arg, "__iter__"):
states = set(arg)
assert all([x in JOB_STATES for x in states])
queue = [doc for doc in trials if doc["state"] in states]
else:
raise TypeError(arg)
rval = len(queue)
return rval
|
(self, arg, trials=None)
|
61,625 |
hyperopt.base
|
count_by_state_unsynced
|
Return trial counts that count_by_state_synced would return if we
called refresh() first.
|
def count_by_state_unsynced(self, arg):
"""
Return trial counts that count_by_state_synced would return if we
called refresh() first.
"""
if self._exp_key is not None:
exp_trials = [
tt for tt in self._dynamic_trials if tt["exp_key"] == self._exp_key
]
else:
exp_trials = self._dynamic_trials
return self.count_by_state_synced(arg, trials=exp_trials)
|
(self, arg)
|
61,626 |
hyperopt.spark
|
count_cancelled_trials
|
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
|
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
|
(self)
|
61,627 |
hyperopt.spark
|
count_failed_trials
|
Returns the current number of trial runs which failed
|
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
|
(self)
|
61,628 |
hyperopt.spark
|
count_successful_trials
|
Returns the current number of trials which ran successfully
|
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
|
(self)
|
61,629 |
hyperopt.spark
|
count_total_trials
|
Returns the current number of all successful, failed, and cancelled trial runs
|
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
|
(self)
|
61,630 |
hyperopt.spark
|
delete_all
|
Reset the Trials to init state
|
def delete_all(self):
"""
Reset the Trials to init state
"""
super().delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
|
(self)
|
61,631 |
hyperopt.spark
|
fmin
|
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
|
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
trials_save_file="",
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not supported
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not supported
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file="", # not supported
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
|
(self, fn, space, algo, max_evals, timeout, loss_threshold, max_queue_len, rstate, verbose, pass_expr_memo_ctrl, catch_eval_exceptions, return_argmin, show_progressbar, early_stop_fn, trials_save_file='')
|
61,632 |
hyperopt.base
|
insert_trial_doc
|
insert trial after error checking
Does not refresh. Call self.refresh() for the trial to appear in
self.specs, self.results, etc.
|
def insert_trial_doc(self, doc):
"""insert trial after error checking
Does not refresh. Call self.refresh() for the trial to appear in
self.specs, self.results, etc.
"""
doc = self.assert_valid_trial(SONify(doc))
return self._insert_trial_docs([doc])[0]
# refreshing could be done fast in this base implementation, but with
# a real DB the steps should be separated.
|
(self, doc)
|
61,633 |
hyperopt.base
|
insert_trial_docs
|
trials - something like is returned by self.new_trial_docs()
|
def insert_trial_docs(self, docs):
"""trials - something like is returned by self.new_trial_docs()"""
docs = [self.assert_valid_trial(SONify(doc)) for doc in docs]
return self._insert_trial_docs(docs)
|
(self, docs)
|
61,634 |
hyperopt.base
|
losses
| null |
def losses(self, bandit=None):
if bandit is None:
return [r.get("loss") for r in self.results]
return list(map(bandit.loss, self.results, self.specs))
|
(self, bandit=None)
|
61,635 |
hyperopt.base
|
new_trial_docs
| null |
def new_trial_docs(self, tids, specs, results, miscs):
assert len(tids) == len(specs) == len(results) == len(miscs)
trials_docs = []
for tid, spec, result, misc in zip(tids, specs, results, miscs):
doc = {
"state": JOB_STATE_NEW,
"tid": tid,
"spec": spec,
"result": result,
"misc": misc,
"exp_key": self._exp_key,
"owner": None,
"version": 0,
"book_time": None,
"refresh_time": None,
}
trials_docs.append(doc)
return trials_docs
|
(self, tids, specs, results, miscs)
|
61,636 |
hyperopt.base
|
new_trial_ids
| null |
def new_trial_ids(self, n):
aa = len(self._ids)
rval = list(range(aa, aa + n))
self._ids.update(rval)
return rval
|
(self, n)
|
61,637 |
hyperopt.base
|
refresh
| null |
def refresh(self):
# In MongoTrials, this method fetches from database
if self._exp_key is None:
self._trials = [
tt for tt in self._dynamic_trials if tt["state"] in JOB_VALID_STATES
]
else:
self._trials = [
tt
for tt in self._dynamic_trials
if (tt["state"] in JOB_VALID_STATES and tt["exp_key"] == self._exp_key)
]
self._ids.update([tt["tid"] for tt in self._trials])
|
(self)
|
61,638 |
hyperopt.base
|
source_trial_docs
| null |
def source_trial_docs(self, tids, specs, results, miscs, sources):
assert len(tids) == len(specs) == len(results) == len(miscs) == len(sources)
rval = []
for tid, spec, result, misc, source in zip(
tids, specs, results, miscs, sources
):
doc = dict(
version=0,
tid=tid,
spec=spec,
result=result,
misc=misc,
state=source["state"],
exp_key=source["exp_key"],
owner=source["owner"],
book_time=source["book_time"],
refresh_time=source["refresh_time"],
)
# -- ensure that misc has the following fields,
# some of which may already by set correctly.
assign = ("tid", tid), ("cmd", None), ("from_tid", source["tid"])
for k, v in assign:
assert doc["misc"].setdefault(k, v) == v
rval.append(doc)
return rval
|
(self, tids, specs, results, miscs, sources)
|
61,639 |
hyperopt.base
|
statuses
| null |
def statuses(self, bandit=None):
if bandit is None:
return [r.get("status") for r in self.results]
return list(map(bandit.status, self.results, self.specs))
|
(self, bandit=None)
|
61,640 |
hyperopt.spark
|
trial_attachments
| null |
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
|
(self, trial)
|
61,641 |
hyperopt.base
|
view
| null |
def view(self, exp_key=None, refresh=True):
rval = object.__new__(self.__class__)
rval._exp_key = exp_key
rval._ids = self._ids
rval._dynamic_trials = self._dynamic_trials
rval.attachments = self.attachments
if refresh:
rval.refresh()
return rval
|
(self, exp_key=None, refresh=True)
|
61,642 |
hyperopt.base
|
Trials
|
Database interface supporting data-driven model-based optimization.
The model-based optimization algorithms used by hyperopt's fmin function
work by analyzing samples of a response surface--a history of what points
in the search space were tested, and what was discovered by those tests.
A Trials instance stores that history and makes it available to fmin and
to the various optimization algorithms.
This class (`base.Trials`) is a pure-Python implementation of the database
in terms of lists of dictionaries. Subclass `mongoexp.MongoTrials`
implements the same API in terms of a mongodb database running in another
process. Other subclasses may be implemented in future.
The elements of `self.trials` represent all of the completed, in-progress,
and scheduled evaluation points from an e.g. `fmin` call.
Each element of `self.trials` is a dictionary with *at least* the following
keys:
* **tid**: a unique trial identification object within this Trials instance
usually it is an integer, but it isn't obvious that other sortable,
hashable objects couldn't be used at some point.
* **result**: a sub-dictionary representing what was returned by the fmin
evaluation function. This sub-dictionary has a key 'status' with a value
from `STATUS_STRINGS` and the status is `STATUS_OK`, then there should be
a 'loss' key as well with a floating-point value. Other special keys in
this sub-dictionary may be used by optimization algorithms (see them
for details). Other keys in this sub-dictionary can be used by the
evaluation function to store miscellaneous diagnostics and debugging
information.
* **misc**: despite generic name, this is currently where the trial's
hyperparameter assignments are stored. This sub-dictionary has two
elements: `'idxs'` and `'vals'`. The `vals` dictionary is
a sub-sub-dictionary mapping each hyperparameter to either `[]` (if the
hyperparameter is inactive in this trial), or `[<val>]` (if the
hyperparameter is active). The `idxs` dictionary is technically
redundant -- it is the same as `vals` but it maps hyperparameter names
to either `[]` or `[<tid>]`.
|
class Trials:
"""Database interface supporting data-driven model-based optimization.
The model-based optimization algorithms used by hyperopt's fmin function
work by analyzing samples of a response surface--a history of what points
in the search space were tested, and what was discovered by those tests.
A Trials instance stores that history and makes it available to fmin and
to the various optimization algorithms.
This class (`base.Trials`) is a pure-Python implementation of the database
in terms of lists of dictionaries. Subclass `mongoexp.MongoTrials`
implements the same API in terms of a mongodb database running in another
process. Other subclasses may be implemented in future.
The elements of `self.trials` represent all of the completed, in-progress,
and scheduled evaluation points from an e.g. `fmin` call.
Each element of `self.trials` is a dictionary with *at least* the following
keys:
* **tid**: a unique trial identification object within this Trials instance
usually it is an integer, but it isn't obvious that other sortable,
hashable objects couldn't be used at some point.
* **result**: a sub-dictionary representing what was returned by the fmin
evaluation function. This sub-dictionary has a key 'status' with a value
from `STATUS_STRINGS` and the status is `STATUS_OK`, then there should be
a 'loss' key as well with a floating-point value. Other special keys in
this sub-dictionary may be used by optimization algorithms (see them
for details). Other keys in this sub-dictionary can be used by the
evaluation function to store miscellaneous diagnostics and debugging
information.
* **misc**: despite generic name, this is currently where the trial's
hyperparameter assignments are stored. This sub-dictionary has two
elements: `'idxs'` and `'vals'`. The `vals` dictionary is
a sub-sub-dictionary mapping each hyperparameter to either `[]` (if the
hyperparameter is inactive in this trial), or `[<val>]` (if the
hyperparameter is active). The `idxs` dictionary is technically
redundant -- it is the same as `vals` but it maps hyperparameter names
to either `[]` or `[<tid>]`.
"""
asynchronous = False
def __init__(self, exp_key=None, refresh=True):
self._ids = set()
self._dynamic_trials = []
self._exp_key = exp_key
self.attachments = {}
if refresh:
self.refresh()
def view(self, exp_key=None, refresh=True):
rval = object.__new__(self.__class__)
rval._exp_key = exp_key
rval._ids = self._ids
rval._dynamic_trials = self._dynamic_trials
rval.attachments = self.attachments
if refresh:
rval.refresh()
return rval
def aname(self, trial, name):
return "ATTACH::{}::{}".format(trial["tid"], name)
def trial_attachments(self, trial):
"""
Support syntax for load: self.trial_attachments(doc)[name]
# -- does this work syntactically?
# (In any event a 2-stage store will work)
Support syntax for store: self.trial_attachments(doc)[name] = value
"""
# don't offer more here than in MongoCtrl
class Attachments:
def __contains__(_self, name):
return self.aname(trial, name) in self.attachments
def __getitem__(_self, name):
return self.attachments[self.aname(trial, name)]
def __setitem__(_self, name, value):
self.attachments[self.aname(trial, name)] = value
def __delitem__(_self, name):
del self.attachments[self.aname(trial, name)]
return Attachments()
def __iter__(self):
try:
return iter(self._trials)
except AttributeError:
print("You have to refresh before you iterate", file=sys.stderr)
raise
def __len__(self):
try:
return len(self._trials)
except AttributeError:
print("You have to refresh before you compute len", file=sys.stderr)
raise
def __getitem__(self, item):
# -- how to make it obvious whether indexing is by _trials position
# or by tid if both are integers?
raise NotImplementedError("")
def refresh(self):
# In MongoTrials, this method fetches from database
if self._exp_key is None:
self._trials = [
tt for tt in self._dynamic_trials if tt["state"] in JOB_VALID_STATES
]
else:
self._trials = [
tt
for tt in self._dynamic_trials
if (tt["state"] in JOB_VALID_STATES and tt["exp_key"] == self._exp_key)
]
self._ids.update([tt["tid"] for tt in self._trials])
@property
def trials(self):
return self._trials
@property
def tids(self):
return [tt["tid"] for tt in self._trials]
@property
def specs(self):
return [tt["spec"] for tt in self._trials]
@property
def results(self):
return [tt["result"] for tt in self._trials]
@property
def miscs(self):
return [tt["misc"] for tt in self._trials]
@property
def idxs_vals(self):
return miscs_to_idxs_vals(self.miscs)
@property
def idxs(self):
return self.idxs_vals[0]
@property
def vals(self):
return self.idxs_vals[1]
def assert_valid_trial(self, trial):
if not (hasattr(trial, "keys") and hasattr(trial, "values")):
raise InvalidTrial("trial should be dict-like", trial)
for key in TRIAL_KEYS:
if key not in trial:
raise InvalidTrial("trial missing key %s", key)
for key in TRIAL_MISC_KEYS:
if key not in trial["misc"]:
raise InvalidTrial('trial["misc"] missing key', key)
if trial["tid"] != trial["misc"]["tid"]:
raise InvalidTrial("tid mismatch between root and misc", trial)
# -- check for SON-encodable
if have_bson:
try:
bson.BSON.encode(trial)
except:
# TODO: save the trial object somewhere to inspect, fix, re-insert
# so that precious data is not simply deallocated and lost.
print("-" * 80)
print("CAN'T ENCODE")
print("-" * 80)
raise
if trial["exp_key"] != self._exp_key:
raise InvalidTrial("wrong exp_key", (trial["exp_key"], self._exp_key))
# XXX how to assert that tids are unique?
return trial
def _insert_trial_docs(self, docs):
"""insert with no error checking"""
rval = [doc["tid"] for doc in docs]
self._dynamic_trials.extend(docs)
return rval
def insert_trial_doc(self, doc):
"""insert trial after error checking
Does not refresh. Call self.refresh() for the trial to appear in
self.specs, self.results, etc.
"""
doc = self.assert_valid_trial(SONify(doc))
return self._insert_trial_docs([doc])[0]
# refreshing could be done fast in this base implementation, but with
# a real DB the steps should be separated.
def insert_trial_docs(self, docs):
"""trials - something like is returned by self.new_trial_docs()"""
docs = [self.assert_valid_trial(SONify(doc)) for doc in docs]
return self._insert_trial_docs(docs)
def new_trial_ids(self, n):
aa = len(self._ids)
rval = list(range(aa, aa + n))
self._ids.update(rval)
return rval
def new_trial_docs(self, tids, specs, results, miscs):
assert len(tids) == len(specs) == len(results) == len(miscs)
trials_docs = []
for tid, spec, result, misc in zip(tids, specs, results, miscs):
doc = {
"state": JOB_STATE_NEW,
"tid": tid,
"spec": spec,
"result": result,
"misc": misc,
"exp_key": self._exp_key,
"owner": None,
"version": 0,
"book_time": None,
"refresh_time": None,
}
trials_docs.append(doc)
return trials_docs
def source_trial_docs(self, tids, specs, results, miscs, sources):
assert len(tids) == len(specs) == len(results) == len(miscs) == len(sources)
rval = []
for tid, spec, result, misc, source in zip(
tids, specs, results, miscs, sources
):
doc = dict(
version=0,
tid=tid,
spec=spec,
result=result,
misc=misc,
state=source["state"],
exp_key=source["exp_key"],
owner=source["owner"],
book_time=source["book_time"],
refresh_time=source["refresh_time"],
)
# -- ensure that misc has the following fields,
# some of which may already by set correctly.
assign = ("tid", tid), ("cmd", None), ("from_tid", source["tid"])
for k, v in assign:
assert doc["misc"].setdefault(k, v) == v
rval.append(doc)
return rval
def delete_all(self):
self._dynamic_trials = []
self.attachments = {}
self.refresh()
def count_by_state_synced(self, arg, trials=None):
"""
Return trial counts by looking at self._trials
"""
if trials is None:
trials = self._trials
if arg in JOB_STATES:
queue = [doc for doc in trials if doc["state"] == arg]
elif hasattr(arg, "__iter__"):
states = set(arg)
assert all([x in JOB_STATES for x in states])
queue = [doc for doc in trials if doc["state"] in states]
else:
raise TypeError(arg)
rval = len(queue)
return rval
def count_by_state_unsynced(self, arg):
"""
Return trial counts that count_by_state_synced would return if we
called refresh() first.
"""
if self._exp_key is not None:
exp_trials = [
tt for tt in self._dynamic_trials if tt["exp_key"] == self._exp_key
]
else:
exp_trials = self._dynamic_trials
return self.count_by_state_synced(arg, trials=exp_trials)
def losses(self, bandit=None):
if bandit is None:
return [r.get("loss") for r in self.results]
return list(map(bandit.loss, self.results, self.specs))
def statuses(self, bandit=None):
if bandit is None:
return [r.get("status") for r in self.results]
return list(map(bandit.status, self.results, self.specs))
def average_best_error(self, bandit=None):
"""Return the average best error of the experiment
Average best error is defined as the average of bandit.true_loss,
weighted by the probability that the corresponding bandit.loss is best.
For domains with loss measurement variance of 0, this function simply
returns the true_loss corresponding to the result with the lowest loss.
"""
if bandit is None:
results = self.results
loss = [r["loss"] for r in results if r["status"] == STATUS_OK]
loss_v = [
r.get("loss_variance", 0) for r in results if r["status"] == STATUS_OK
]
true_loss = [
r.get("true_loss", r["loss"])
for r in results
if r["status"] == STATUS_OK
]
else:
def fmap(f):
rval = np.asarray(
[
f(r, s)
for (r, s) in zip(self.results, self.specs)
if bandit.status(r) == STATUS_OK
]
).astype("float")
if not np.all(np.isfinite(rval)):
raise ValueError()
return rval
loss = fmap(bandit.loss)
loss_v = fmap(bandit.loss_variance)
true_loss = fmap(bandit.true_loss)
loss3 = list(zip(loss, loss_v, true_loss))
if not loss3:
raise ValueError("Empty loss vector")
loss3.sort()
loss3 = np.asarray(loss3)
if np.all(loss3[:, 1] == 0):
best_idx = np.argmin(loss3[:, 0])
return loss3[best_idx, 2]
else:
cutoff = 0
sigma = np.sqrt(loss3[0][1])
while cutoff < len(loss3) and loss3[cutoff][0] < loss3[0][0] + 3 * sigma:
cutoff += 1
pmin = pmin_sampled(loss3[:cutoff, 0], loss3[:cutoff, 1])
avg_true_loss = (pmin * loss3[:cutoff, 2]).sum()
return avg_true_loss
@property
def best_trial(self):
"""
Trial with lowest non-NaN loss and status=STATUS_OK.
If no such trial exists, returns None.
"""
candidates = [
t
for t in self.trials
if t["result"]["status"] == STATUS_OK and not np.isnan(t["result"]["loss"])
]
if not candidates:
raise AllTrialsFailed
losses = [float(t["result"]["loss"]) for t in candidates]
if len(losses) == 0:
return None
best = np.nanargmin(losses)
return candidates[best]
@property
def argmin(self):
best_trial = self.best_trial
vals = best_trial["misc"]["vals"]
# unpack the one-element lists to values
# and skip over the 0-element lists
rval = {}
for k, v in list(vals.items()):
if v:
rval[k] = v[0]
return rval
def fmin(
self,
fn,
space,
algo=None,
max_evals=None,
timeout=None,
loss_threshold=None,
max_queue_len=1,
rstate=None,
verbose=False,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
return_argmin=True,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
"""Minimize a function over a hyperparameter space.
For most parameters, see `hyperopt.fmin.fmin`.
Parameters
----------
catch_eval_exceptions : bool, default False
If set to True, exceptions raised by either the evaluation of the
configuration space from hyperparameters or the execution of `fn`
, will be caught by fmin, and recorded in self._dynamic_trials as
error jobs (JOB_STATE_ERROR). If set to False, such exceptions
will not be caught, and so they will propagate to calling code.
show_progressbar : bool or context manager, default True.
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
"""
# -- Stop-gap implementation!
# fmin should have been a Trials method in the first place
# but for now it's still sitting in another file.
from .fmin import fmin
return fmin(
fn,
space,
algo=algo,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
trials=self,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len,
allow_trials_fmin=False, # -- prevent recursion
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
|
(exp_key=None, refresh=True)
|
61,644 |
hyperopt.base
|
__init__
| null |
def __init__(self, exp_key=None, refresh=True):
self._ids = set()
self._dynamic_trials = []
self._exp_key = exp_key
self.attachments = {}
if refresh:
self.refresh()
|
(self, exp_key=None, refresh=True)
|
61,653 |
hyperopt.base
|
delete_all
| null |
def delete_all(self):
self._dynamic_trials = []
self.attachments = {}
self.refresh()
|
(self)
|
61,654 |
hyperopt.base
|
fmin
|
Minimize a function over a hyperparameter space.
For most parameters, see `hyperopt.fmin.fmin`.
Parameters
----------
catch_eval_exceptions : bool, default False
If set to True, exceptions raised by either the evaluation of the
configuration space from hyperparameters or the execution of `fn`
, will be caught by fmin, and recorded in self._dynamic_trials as
error jobs (JOB_STATE_ERROR). If set to False, such exceptions
will not be caught, and so they will propagate to calling code.
show_progressbar : bool or context manager, default True.
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
|
def fmin(
self,
fn,
space,
algo=None,
max_evals=None,
timeout=None,
loss_threshold=None,
max_queue_len=1,
rstate=None,
verbose=False,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
return_argmin=True,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
"""Minimize a function over a hyperparameter space.
For most parameters, see `hyperopt.fmin.fmin`.
Parameters
----------
catch_eval_exceptions : bool, default False
If set to True, exceptions raised by either the evaluation of the
configuration space from hyperparameters or the execution of `fn`
, will be caught by fmin, and recorded in self._dynamic_trials as
error jobs (JOB_STATE_ERROR). If set to False, such exceptions
will not be caught, and so they will propagate to calling code.
show_progressbar : bool or context manager, default True.
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
"""
# -- Stop-gap implementation!
# fmin should have been a Trials method in the first place
# but for now it's still sitting in another file.
from .fmin import fmin
return fmin(
fn,
space,
algo=algo,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
trials=self,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len,
allow_trials_fmin=False, # -- prevent recursion
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
|
(self, fn, space, algo=None, max_evals=None, timeout=None, loss_threshold=None, max_queue_len=1, rstate=None, verbose=False, pass_expr_memo_ctrl=None, catch_eval_exceptions=False, return_argmin=True, show_progressbar=True, early_stop_fn=None, trials_save_file='')
|
61,663 |
hyperopt.base
|
trial_attachments
|
Support syntax for load: self.trial_attachments(doc)[name]
# -- does this work syntactically?
# (In any event a 2-stage store will work)
Support syntax for store: self.trial_attachments(doc)[name] = value
|
def trial_attachments(self, trial):
"""
Support syntax for load: self.trial_attachments(doc)[name]
# -- does this work syntactically?
# (In any event a 2-stage store will work)
Support syntax for store: self.trial_attachments(doc)[name] = value
"""
# don't offer more here than in MongoCtrl
class Attachments:
def __contains__(_self, name):
return self.aname(trial, name) in self.attachments
def __getitem__(_self, name):
return self.attachments[self.aname(trial, name)]
def __setitem__(_self, name, value):
self.attachments[self.aname(trial, name)] = value
def __delitem__(_self, name):
del self.attachments[self.aname(trial, name)]
return Attachments()
|
(self, trial)
|
61,670 |
hyperopt.fmin
|
fmin
|
Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
`trials`
Parameters
----------
fn : callable (trial point -> loss)
This function will be called with a value generated from `space`
as the first and possibly only argument. It can return either
a scalar-valued loss, or a dictionary. A returned dictionary must
contain a 'status' key with a value from `STATUS_STRINGS`, must
contain a 'loss' key if the status is `STATUS_OK`. Particular
optimization algorithms may look for other keys as well. An
optional sub-dictionary associated with an 'attachments' key will
be removed by fmin its contents will be available via
`trials.trial_attachments`. The rest (usually all) of the returned
dictionary will be stored and available later as some 'result'
sub-dictionary within `trials.trials`.
space : hyperopt.pyll.Apply node or "annotated"
The set of possible arguments to `fn` is the set of objects
that could be created with non-zero probability by drawing randomly
from this stochastic program involving involving hp_<xxx> nodes
(see `hyperopt.hp` and `hyperopt.pyll_utils`).
If set to "annotated", will read space using type hint in fn. Ex:
(`def fn(x: hp.uniform("x", -1, 1)): return x`)
algo : search algorithm
This object, such as `hyperopt.rand.suggest` and
`hyperopt.tpe.suggest` provides logic for sequential search of the
hyperparameter space.
max_evals : int
Allow up to this many function evaluations before returning.
timeout : None or int, default None
Limits search time by parametrized number of seconds.
If None, then the search process has no time constraint.
loss_threshold : None or double, default None
Limits search time when minimal loss reduced to certain amount.
If None, then the search process has no constraint on the loss,
and will stop based on other parameters, e.g. `max_evals`, `timeout`
trials : None or base.Trials (or subclass)
Storage for completed, ongoing, and scheduled evaluation points. If
None, then a temporary `base.Trials` instance will be created. If
a trials object, then that trials object will be affected by
side-effect of this call.
rstate : numpy.random.Generator, default numpy.random or `$HYPEROPT_FMIN_SEED`
Each call to `algo` requires a seed value, which should be different
on each call. This object is used to draw these seeds via `randint`.
The default rstate is
`numpy.random.default_rng(int(env['HYPEROPT_FMIN_SEED']))`
if the `HYPEROPT_FMIN_SEED` environment variable is set to a non-empty
string, otherwise np.random is used in whatever state it is in.
verbose : bool
Print out some information to stdout during search. If False, disable
progress bar irrespectively of show_progressbar argument
allow_trials_fmin : bool, default True
If the `trials` argument
pass_expr_memo_ctrl : bool, default False
If set to True, `fn` will be called in a different more low-level
way: it will receive raw hyperparameters, a partially-populated
`memo`, and a Ctrl object for communication with this Trials
object.
return_argmin : bool, default True
If set to False, this function returns nothing, which can be useful
for example if it is expected that `len(trials)` may be zero after
fmin, and therefore `trials.argmin` would be undefined.
points_to_evaluate : list, default None
Only works if trials=None. If points_to_evaluate equals None then the
trials are evaluated normally. If list of dicts is passed then
given points are evaluated before optimisation starts, so the overall
number of optimisation steps is len(points_to_evaluate) + max_evals.
Elements of this list must be in a form of a dictionary with variable
names as keys and variable values as dict values. Example
points_to_evaluate value is [{'x': 0.0, 'y': 0.0}, {'x': 1.0, 'y': 2.0}]
max_queue_len : integer, default 1
Sets the queue length generated in the dictionary or trials. Increasing this
value helps to slightly speed up parallel simulatulations which sometimes lag
on suggesting a new trial.
show_progressbar : bool or context manager, default True (or False is verbose is False).
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
early_stop_fn: callable ((result, *args) -> (Boolean, *args)).
Called after every run with the result of the run and the values returned by the function previously.
Stop the search if the function return true.
Default None.
trials_save_file: str, default ""
Optional file name to save the trials object to every iteration.
If specified and the file already exists, will load from this file when
trials=None instead of creating a new base.Trials object
Returns
-------
argmin : dictionary
If return_argmin is True returns `trials.argmin` which is a dictionary. Otherwise
this function returns the result of `hyperopt.space_eval(space, trails.argmin)` if there
were successfull trails. This object shares the same structure as the space passed.
If there were no successfull trails, it returns None.
|
def fmin(
fn,
space,
algo=None,
max_evals=None,
timeout=None,
loss_threshold=None,
trials=None,
rstate=None,
allow_trials_fmin=True,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
verbose=True,
return_argmin=True,
points_to_evaluate=None,
max_queue_len=1,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
"""Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
`trials`
Parameters
----------
fn : callable (trial point -> loss)
This function will be called with a value generated from `space`
as the first and possibly only argument. It can return either
a scalar-valued loss, or a dictionary. A returned dictionary must
contain a 'status' key with a value from `STATUS_STRINGS`, must
contain a 'loss' key if the status is `STATUS_OK`. Particular
optimization algorithms may look for other keys as well. An
optional sub-dictionary associated with an 'attachments' key will
be removed by fmin its contents will be available via
`trials.trial_attachments`. The rest (usually all) of the returned
dictionary will be stored and available later as some 'result'
sub-dictionary within `trials.trials`.
space : hyperopt.pyll.Apply node or "annotated"
The set of possible arguments to `fn` is the set of objects
that could be created with non-zero probability by drawing randomly
from this stochastic program involving involving hp_<xxx> nodes
(see `hyperopt.hp` and `hyperopt.pyll_utils`).
If set to "annotated", will read space using type hint in fn. Ex:
(`def fn(x: hp.uniform("x", -1, 1)): return x`)
algo : search algorithm
This object, such as `hyperopt.rand.suggest` and
`hyperopt.tpe.suggest` provides logic for sequential search of the
hyperparameter space.
max_evals : int
Allow up to this many function evaluations before returning.
timeout : None or int, default None
Limits search time by parametrized number of seconds.
If None, then the search process has no time constraint.
loss_threshold : None or double, default None
Limits search time when minimal loss reduced to certain amount.
If None, then the search process has no constraint on the loss,
and will stop based on other parameters, e.g. `max_evals`, `timeout`
trials : None or base.Trials (or subclass)
Storage for completed, ongoing, and scheduled evaluation points. If
None, then a temporary `base.Trials` instance will be created. If
a trials object, then that trials object will be affected by
side-effect of this call.
rstate : numpy.random.Generator, default numpy.random or `$HYPEROPT_FMIN_SEED`
Each call to `algo` requires a seed value, which should be different
on each call. This object is used to draw these seeds via `randint`.
The default rstate is
`numpy.random.default_rng(int(env['HYPEROPT_FMIN_SEED']))`
if the `HYPEROPT_FMIN_SEED` environment variable is set to a non-empty
string, otherwise np.random is used in whatever state it is in.
verbose : bool
Print out some information to stdout during search. If False, disable
progress bar irrespectively of show_progressbar argument
allow_trials_fmin : bool, default True
If the `trials` argument
pass_expr_memo_ctrl : bool, default False
If set to True, `fn` will be called in a different more low-level
way: it will receive raw hyperparameters, a partially-populated
`memo`, and a Ctrl object for communication with this Trials
object.
return_argmin : bool, default True
If set to False, this function returns nothing, which can be useful
for example if it is expected that `len(trials)` may be zero after
fmin, and therefore `trials.argmin` would be undefined.
points_to_evaluate : list, default None
Only works if trials=None. If points_to_evaluate equals None then the
trials are evaluated normally. If list of dicts is passed then
given points are evaluated before optimisation starts, so the overall
number of optimisation steps is len(points_to_evaluate) + max_evals.
Elements of this list must be in a form of a dictionary with variable
names as keys and variable values as dict values. Example
points_to_evaluate value is [{'x': 0.0, 'y': 0.0}, {'x': 1.0, 'y': 2.0}]
max_queue_len : integer, default 1
Sets the queue length generated in the dictionary or trials. Increasing this
value helps to slightly speed up parallel simulatulations which sometimes lag
on suggesting a new trial.
show_progressbar : bool or context manager, default True (or False is verbose is False).
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
early_stop_fn: callable ((result, *args) -> (Boolean, *args)).
Called after every run with the result of the run and the values returned by the function previously.
Stop the search if the function return true.
Default None.
trials_save_file: str, default ""
Optional file name to save the trials object to every iteration.
If specified and the file already exists, will load from this file when
trials=None instead of creating a new base.Trials object
Returns
-------
argmin : dictionary
If return_argmin is True returns `trials.argmin` which is a dictionary. Otherwise
this function returns the result of `hyperopt.space_eval(space, trails.argmin)` if there
were successfull trails. This object shares the same structure as the space passed.
If there were no successfull trails, it returns None.
"""
if algo is None:
algo = tpe.suggest
logger.warning("TPE is being used as the default algorithm.")
if max_evals is None:
max_evals = sys.maxsize
if rstate is None:
env_rseed = os.environ.get("HYPEROPT_FMIN_SEED", "")
if env_rseed:
rstate = np.random.default_rng(int(env_rseed))
else:
rstate = np.random.default_rng()
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
if space == "annotated":
# Read space from objective fn
space = inspect.getfullargspec(fn).annotations
# Validate space
for param, hp_func in space.items():
if not isinstance(hp_func, pyll.base.Apply):
raise exceptions.InvalidAnnotatedParameter(
'When using `space="annotated"`, please annotate the '
"objective function arguments with a `pyll.base.Apply` "
"subclass. See example in `fmin` docstring"
)
# Change fn to accept a dict-like argument
fn = __objective_fmin_wrapper(fn)
if allow_trials_fmin and hasattr(trials, "fmin"):
return trials.fmin(
fn,
space,
algo=algo,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
rstate=rstate,
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
verbose=verbose,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
if trials is None:
if os.path.exists(trials_save_file):
trials = pickler.load(open(trials_save_file, "rb"))
elif points_to_evaluate is None:
trials = base.Trials()
else:
assert type(points_to_evaluate) == list
trials = generate_trials_to_calculate(points_to_evaluate)
domain = base.Domain(fn, space, pass_expr_memo_ctrl=pass_expr_memo_ctrl)
rval = FMinIter(
algo,
domain,
trials,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
rval.catch_eval_exceptions = catch_eval_exceptions
# next line is where the fmin is actually executed
rval.exhaust()
if return_argmin:
if len(trials.trials) == 0:
raise Exception(
"There are no evaluation tasks, cannot return argmin of task losses."
)
return trials.argmin
if len(trials) > 0:
# Only if there are some successful trail runs, return the best point in
# the evaluation space
return space_eval(space, trials.argmin)
return None
|
(fn, space, algo=None, max_evals=None, timeout=None, loss_threshold=None, trials=None, rstate=None, allow_trials_fmin=True, pass_expr_memo_ctrl=None, catch_eval_exceptions=False, verbose=True, return_argmin=True, points_to_evaluate=None, max_queue_len=1, show_progressbar=True, early_stop_fn=None, trials_save_file='')
|
61,671 |
hyperopt.fmin
|
fmin_pass_expr_memo_ctrl
|
Mark a function as expecting kwargs 'expr', 'memo' and 'ctrl' from
hyperopt.fmin.
expr - the pyll expression of the search space
memo - a partially-filled memo dictionary such that
`rec_eval(expr, memo=memo)` will build the proposed trial point.
ctrl - the Experiment control object (see base.Ctrl)
|
def fmin_pass_expr_memo_ctrl(f):
"""
Mark a function as expecting kwargs 'expr', 'memo' and 'ctrl' from
hyperopt.fmin.
expr - the pyll expression of the search space
memo - a partially-filled memo dictionary such that
`rec_eval(expr, memo=memo)` will build the proposed trial point.
ctrl - the Experiment control object (see base.Ctrl)
"""
f.fmin_pass_expr_memo_ctrl = True
return f
|
(f)
|
61,674 |
hyperopt.fmin
|
partial
|
functools.partial work-alike for functions decorated with
fmin_pass_expr_memo_ctrl
|
def partial(fn, **kwargs):
"""functools.partial work-alike for functions decorated with
fmin_pass_expr_memo_ctrl
"""
rval = functools.partial(fn, **kwargs)
if hasattr(fn, "fmin_pass_expr_memo_ctrl"):
rval.fmin_pass_expr_memo_ctrl = fn.fmin_pass_expr_memo_ctrl
return rval
|
(fn, **kwargs)
|
61,679 |
hyperopt.fmin
|
space_eval
|
Compute a point in a search space from a hyperparameter assignment.
Parameters:
-----------
space - a pyll graph involving hp nodes (see `pyll_utils`).
hp_assignment - a dictionary mapping hp node labels to values.
|
def space_eval(space, hp_assignment):
"""Compute a point in a search space from a hyperparameter assignment.
Parameters:
-----------
space - a pyll graph involving hp nodes (see `pyll_utils`).
hp_assignment - a dictionary mapping hp node labels to values.
"""
space = pyll.as_apply(space)
nodes = pyll.toposort(space)
memo = {}
for node in nodes:
if node.name == "hyperopt_param":
label = node.arg["label"].eval()
if label in hp_assignment:
memo[node] = hp_assignment[label]
rval = pyll.rec_eval(space, memo=memo)
return rval
|
(space, hp_assignment)
|
61,683 |
hyperopt.base
|
trials_from_docs
|
Construct a Trials base class instance from a list of trials documents
|
def trials_from_docs(docs, validate=True, **kwargs):
"""Construct a Trials base class instance from a list of trials documents"""
rval = Trials(**kwargs)
if validate:
rval.insert_trial_docs(docs)
else:
rval._insert_trial_docs(docs)
rval.refresh()
return rval
|
(docs, validate=True, **kwargs)
|
61,686 |
visaplan.plone.ajaxnavigation
|
AjaxnavTypeError
|
Some visaplan.plone.ajaxnavigation component has been used wrongly
|
class AjaxnavTypeError(TypeError, Error):
"""
Some visaplan.plone.ajaxnavigation component has been used wrongly
"""
| null |
61,687 |
visaplan.plone.ajaxnavigation
|
Error
|
Root exception class for the visaplan.plone.ajaxnavigation package
|
class Error(Exception):
"""
Root exception class for the visaplan.plone.ajaxnavigation package
"""
| null |
61,688 |
zope.i18nmessageid.message
|
MessageFactory
|
Factory for creating i18n messages.
|
class MessageFactory:
"""Factory for creating i18n messages."""
def __init__(self, domain):
self._domain = domain
def __call__(self, ustr, default=None, mapping=None,
msgid_plural=None, default_plural=None, number=None):
return Message(ustr, self._domain, default, mapping,
msgid_plural, default_plural, number)
|
(domain)
|
61,689 |
zope.i18nmessageid.message
|
__call__
| null |
def __call__(self, ustr, default=None, mapping=None,
msgid_plural=None, default_plural=None, number=None):
return Message(ustr, self._domain, default, mapping,
msgid_plural, default_plural, number)
|
(self, ustr, default=None, mapping=None, msgid_plural=None, default_plural=None, number=None)
|
61,690 |
zope.i18nmessageid.message
|
__init__
| null |
def __init__(self, domain):
self._domain = domain
|
(self, domain)
|
61,691 |
visaplan.plone.ajaxnavigation
|
TemplateNotFound
| null |
class TemplateNotFound(Error):
pass
| null |
61,692 |
visaplan.plone.ajaxnavigation
|
ToolNotFound
| null |
class ToolNotFound(Error):
pass
| null |
61,693 |
eval_type_backport.eval_type_backport
|
ForwardRef
|
Like `typing.ForwardRef`, but lets older Python versions use newer typing features.
Specifically, when evaluated, this transforms `X | Y` into `typing.Union[X, Y]`
and `list[X]` into `typing.List[X]` etc. (for all the types made generic in PEP 585)
if the original syntax is not supported in the current Python version.
|
class ForwardRef(typing.ForwardRef, _root=True): # type: ignore[call-arg,misc]
"""
Like `typing.ForwardRef`, but lets older Python versions use newer typing features.
Specifically, when evaluated, this transforms `X | Y` into `typing.Union[X, Y]`
and `list[X]` into `typing.List[X]` etc. (for all the types made generic in PEP 585)
if the original syntax is not supported in the current Python version.
"""
def _evaluate(
self,
globalns: dict[str, Any] | None,
localns: dict[str, Any] | None,
recursive_guard: frozenset[str] | None = None,
) -> Any:
try:
return super()._evaluate(
globalns,
localns,
# assume `recursive_guard` is provided by typing,
# so if it's not None, it is supported in typing.ForwardRef
*() if recursive_guard is None else (recursive_guard,),
)
except TypeError as e:
if not is_backport_fixable_error(e):
raise
return _eval_direct(self, globalns, localns)
|
(arg, is_argument=True, module=None, *, is_class=False)
|
61,698 |
eval_type_backport.eval_type_backport
|
_evaluate
| null |
def _evaluate(
self,
globalns: dict[str, Any] | None,
localns: dict[str, Any] | None,
recursive_guard: frozenset[str] | None = None,
) -> Any:
try:
return super()._evaluate(
globalns,
localns,
# assume `recursive_guard` is provided by typing,
# so if it's not None, it is supported in typing.ForwardRef
*() if recursive_guard is None else (recursive_guard,),
)
except TypeError as e:
if not is_backport_fixable_error(e):
raise
return _eval_direct(self, globalns, localns)
|
(self, globalns: dict[str, typing.Any] | None, localns: dict[str, typing.Any] | None, recursive_guard: Optional[frozenset[str]] = None) -> Any
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.