index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
54,246 |
nbconvert.exporters.exporter
|
from_notebook_node
|
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~nbformat.NotebookNode`
Notebook node (dict-like with attr-access)
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
|
def from_notebook_node(
self, nb: NotebookNode, resources: t.Any | None = None, **kw: t.Any
) -> tuple[NotebookNode, dict[str, t.Any]]:
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~nbformat.NotebookNode`
Notebook node (dict-like with attr-access)
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
`**kw`
Ignored
"""
nb_copy = copy.deepcopy(nb)
resources = self._init_resources(resources)
if "language" in nb["metadata"]:
resources["language"] = nb["metadata"]["language"].lower()
# Preprocess
nb_copy, resources = self._preprocess(nb_copy, resources)
notebook_name = ""
if resources is not None:
name = resources.get("metadata", {}).get("name", "")
path = resources.get("metadata", {}).get("path", "")
notebook_name = os.path.join(path, name)
self._nb_metadata[notebook_name] = nb_copy.metadata
return nb_copy, resources
|
(self, nb: nbformat.notebooknode.NotebookNode, resources: Optional[Any] = None, **kw: Any) -> tuple[nbformat.notebooknode.NotebookNode, dict[str, typing.Any]]
|
54,264 |
nbconvert.exporters.base
|
ExporterNameError
|
An exporter name error.
|
class ExporterNameError(NameError):
"""An exporter name error."""
| null |
54,265 |
nbconvert.exporters.exporter
|
FilenameExtension
|
A trait for filename extensions.
|
class FilenameExtension(Unicode): # type:ignore[type-arg]
"""A trait for filename extensions."""
default_value = ""
info_text = "a filename extension, beginning with a dot"
def validate(self, obj, value):
"""Validate the file name."""
# cast to proper unicode
value = super().validate(obj, value)
# check that it starts with a dot
if value and not value.startswith("."):
msg = "FileExtension trait '{}' does not begin with a dot: {!r}"
raise TraitError(msg.format(self.name, value))
return value
|
(default_value: Any = traitlets.Undefined, allow_none: bool = False, read_only: bool = None, help: 'str | None' = None, config: 't.Any' = None, **kwargs: 't.Any') -> 'None'
|
54,287 |
nbconvert.exporters.exporter
|
validate
|
Validate the file name.
|
def validate(self, obj, value):
"""Validate the file name."""
# cast to proper unicode
value = super().validate(obj, value)
# check that it starts with a dot
if value and not value.startswith("."):
msg = "FileExtension trait '{}' does not begin with a dot: {!r}"
raise TraitError(msg.format(self.name, value))
return value
|
(self, obj, value)
|
54,288 |
nbconvert.exporters.html
|
HTMLExporter
|
Exports a basic HTML document. This exporter assists with the export of
HTML. Inherit from it if you are writing your own HTML template and need
custom preprocessors/filters. If you don't need custom preprocessors/
filters, just change the 'template_file' config option.
|
class HTMLExporter(TemplateExporter):
"""
Exports a basic HTML document. This exporter assists with the export of
HTML. Inherit from it if you are writing your own HTML template and need
custom preprocessors/filters. If you don't need custom preprocessors/
filters, just change the 'template_file' config option.
"""
export_from_notebook = "HTML"
anchor_link_text = Unicode("¶", help="The text used as the text for anchor links.").tag(
config=True
)
exclude_anchor_links = Bool(False, help="If anchor links should be included or not.").tag(
config=True
)
require_js_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js",
help="""
URL to load require.js from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
mathjax_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML-full,Safe",
help="""
URL to load Mathjax from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
mermaid_js_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/mermaid/10.7.0/mermaid.esm.min.mjs",
help="""
URL to load MermaidJS from.
Defaults to loading from cdnjs.
""",
)
jquery_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js",
help="""
URL to load jQuery from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
jupyter_widgets_base_url = Unicode(
"https://unpkg.com/", help="URL base for Jupyter widgets"
).tag(config=True)
widget_renderer_url = Unicode("", help="Full URL for Jupyter widgets").tag(config=True)
html_manager_semver_range = Unicode(
"*", help="Semver range for Jupyter widgets HTML manager"
).tag(config=True)
@default("file_extension")
def _file_extension_default(self):
return ".html"
@default("template_name")
def _template_name_default(self):
return "lab"
theme = Unicode(
"light",
help="Template specific theme(e.g. the name of a JupyterLab CSS theme distributed as prebuilt extension for the lab template)",
).tag(config=True)
sanitize_html = Bool(
False,
help=(
"Whether the HTML in Markdown cells and cell outputs should be sanitized."
"This should be set to True by nbviewer or similar tools."
),
).tag(config=True)
skip_svg_encoding = Bool(
False,
help=("Whether the svg to image data attribute encoding should occur"),
).tag(config=True)
embed_images = Bool(
False, help="Whether or not to embed images as base64 in markdown cells."
).tag(config=True)
output_mimetype = "text/html"
@property
def default_config(self):
c = Config(
{
"NbConvertBase": {
"display_data_priority": [
"application/vnd.jupyter.widget-view+json",
"application/javascript",
"text/html",
"text/markdown",
"image/svg+xml",
"text/vnd.mermaid",
"text/latex",
"image/png",
"image/jpeg",
"text/plain",
]
},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
if super().default_config:
c2 = super().default_config.copy()
c2.merge(c)
c = c2
return c
language_code = Unicode(
"en", help="Language code of the content, should be one of the ISO639-1"
).tag(config=True)
@validate("language_code")
def _valid_language_code(self, proposal):
if self.language_code not in iso639_1:
self.log.warning(
'"%s" is not an ISO 639-1 language code. '
'It has been replaced by the default value "en".',
self.language_code,
)
return proposal["trait"].default_value
return proposal["value"]
@contextfilter
def markdown2html(self, context, source):
"""Markdown to HTML filter respecting the anchor_link_text setting"""
cell = context.get("cell", {})
attachments = cell.get("attachments", {})
path = context.get("resources", {}).get("metadata", {}).get("path", "")
renderer = IPythonRenderer(
escape=False,
attachments=attachments,
embed_images=self.embed_images,
path=path,
anchor_link_text=self.anchor_link_text,
exclude_anchor_links=self.exclude_anchor_links,
)
return MarkdownWithMath(renderer=renderer).render(source)
def default_filters(self):
"""Get the default filters."""
yield from super().default_filters()
yield ("markdown2html", self.markdown2html)
def from_notebook_node( # type:ignore[explicit-override, override]
self, nb: NotebookNode, resources: Optional[Dict[str, Any]] = None, **kw: Any
) -> Tuple[str, Dict[str, Any]]:
"""Convert from notebook node."""
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2HTML(pygments_lexer=lexer, parent=self)
)
resources = self._init_resources(resources)
filter_data_type = WidgetsDataTypeFilter(
notebook_metadata=self._nb_metadata, parent=self, resources=resources
)
self.register_filter("highlight_code", highlight_code)
self.register_filter("filter_data_type", filter_data_type)
html, resources = super().from_notebook_node(nb, resources, **kw)
soup = BeautifulSoup(html, features="html.parser")
# Add image's alternative text
missing_alt = 0
for elem in soup.select("img:not([alt])"):
elem.attrs["alt"] = "No description has been provided for this image"
missing_alt += 1
if missing_alt:
self.log.warning("Alternative text is missing on %s image(s).", missing_alt)
# Set input and output focusable
for elem in soup.select(".jp-Notebook div.jp-Cell-inputWrapper"):
elem.attrs["tabindex"] = "0"
for elem in soup.select(".jp-Notebook div.jp-OutputArea-output"):
elem.attrs["tabindex"] = "0"
return str(soup), resources
def _init_resources(self, resources):
def resources_include_css(name):
env = self.environment
code = """<style type="text/css">\n%s</style>""" % (env.loader.get_source(env, name)[0])
return markupsafe.Markup(code)
def resources_include_lab_theme(name):
# Try to find the theme with the given name, looking through the labextensions
_, theme_path = find_lab_theme(name)
with open(theme_path / "index.css") as file:
data = file.read()
# Embed assets (fonts, images...)
for asset in os.listdir(theme_path):
local_url = f"url({Path(asset).as_posix()})"
if local_url in data:
mime_type = mimetypes.guess_type(asset)[0]
# Replace asset url by a base64 dataurl
with open(theme_path / asset, "rb") as assetfile:
base64_data = base64.b64encode(assetfile.read())
base64_str = base64_data.replace(b"\n", b"").decode("ascii")
data = data.replace(local_url, f"url(data:{mime_type};base64,{base64_str})")
code = """<style type="text/css">\n%s</style>""" % data
return markupsafe.Markup(code)
def resources_include_js(name, module=False):
"""Get the resources include JS for a name. If module=True, import as ES module"""
env = self.environment
code = f"""<script {'type="module"' if module else ""}>\n{env.loader.get_source(env, name)[0]}</script>"""
return markupsafe.Markup(code)
def resources_include_url(name):
"""Get the resources include url for a name."""
env = self.environment
mime_type, encoding = mimetypes.guess_type(name)
try:
# we try to load via the jinja loader, but that tries to load
# as (encoded) text
data = env.loader.get_source(env, name)[0].encode("utf8")
except UnicodeDecodeError:
# if that fails (for instance a binary file, png or ttf)
# we mimic jinja2
pieces = split_template_path(name)
for searchpath in self.template_paths:
filename = os.path.join(searchpath, *pieces)
if os.path.exists(filename):
with open(filename, "rb") as f:
data = f.read()
break
else:
msg = f"No file {name!r} found in {searchpath!r}"
raise ValueError(msg)
data = base64.b64encode(data)
data = data.replace(b"\n", b"").decode("ascii")
src = f"data:{mime_type};base64,{data}"
return markupsafe.Markup(src)
resources = super()._init_resources(resources)
resources["theme"] = self.theme
resources["include_css"] = resources_include_css
resources["include_lab_theme"] = resources_include_lab_theme
resources["include_js"] = resources_include_js
resources["include_url"] = resources_include_url
resources["require_js_url"] = self.require_js_url
resources["mathjax_url"] = self.mathjax_url
resources["mermaid_js_url"] = self.mermaid_js_url
resources["jquery_url"] = self.jquery_url
resources["jupyter_widgets_base_url"] = self.jupyter_widgets_base_url
resources["widget_renderer_url"] = self.widget_renderer_url
resources["html_manager_semver_range"] = self.html_manager_semver_range
resources["should_sanitize_html"] = self.sanitize_html
resources["language_code"] = self.language_code
resources["should_not_encode_svg"] = self.skip_svg_encoding
return resources
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,300 |
nbconvert.exporters.html
|
_init_resources
| null |
def _init_resources(self, resources):
def resources_include_css(name):
env = self.environment
code = """<style type="text/css">\n%s</style>""" % (env.loader.get_source(env, name)[0])
return markupsafe.Markup(code)
def resources_include_lab_theme(name):
# Try to find the theme with the given name, looking through the labextensions
_, theme_path = find_lab_theme(name)
with open(theme_path / "index.css") as file:
data = file.read()
# Embed assets (fonts, images...)
for asset in os.listdir(theme_path):
local_url = f"url({Path(asset).as_posix()})"
if local_url in data:
mime_type = mimetypes.guess_type(asset)[0]
# Replace asset url by a base64 dataurl
with open(theme_path / asset, "rb") as assetfile:
base64_data = base64.b64encode(assetfile.read())
base64_str = base64_data.replace(b"\n", b"").decode("ascii")
data = data.replace(local_url, f"url(data:{mime_type};base64,{base64_str})")
code = """<style type="text/css">\n%s</style>""" % data
return markupsafe.Markup(code)
def resources_include_js(name, module=False):
"""Get the resources include JS for a name. If module=True, import as ES module"""
env = self.environment
code = f"""<script {'type="module"' if module else ""}>\n{env.loader.get_source(env, name)[0]}</script>"""
return markupsafe.Markup(code)
def resources_include_url(name):
"""Get the resources include url for a name."""
env = self.environment
mime_type, encoding = mimetypes.guess_type(name)
try:
# we try to load via the jinja loader, but that tries to load
# as (encoded) text
data = env.loader.get_source(env, name)[0].encode("utf8")
except UnicodeDecodeError:
# if that fails (for instance a binary file, png or ttf)
# we mimic jinja2
pieces = split_template_path(name)
for searchpath in self.template_paths:
filename = os.path.join(searchpath, *pieces)
if os.path.exists(filename):
with open(filename, "rb") as f:
data = f.read()
break
else:
msg = f"No file {name!r} found in {searchpath!r}"
raise ValueError(msg)
data = base64.b64encode(data)
data = data.replace(b"\n", b"").decode("ascii")
src = f"data:{mime_type};base64,{data}"
return markupsafe.Markup(src)
resources = super()._init_resources(resources)
resources["theme"] = self.theme
resources["include_css"] = resources_include_css
resources["include_lab_theme"] = resources_include_lab_theme
resources["include_js"] = resources_include_js
resources["include_url"] = resources_include_url
resources["require_js_url"] = self.require_js_url
resources["mathjax_url"] = self.mathjax_url
resources["mermaid_js_url"] = self.mermaid_js_url
resources["jquery_url"] = self.jquery_url
resources["jupyter_widgets_base_url"] = self.jupyter_widgets_base_url
resources["widget_renderer_url"] = self.widget_renderer_url
resources["html_manager_semver_range"] = self.html_manager_semver_range
resources["should_sanitize_html"] = self.sanitize_html
resources["language_code"] = self.language_code
resources["should_not_encode_svg"] = self.skip_svg_encoding
return resources
|
(self, resources)
|
54,313 |
nbconvert.exporters.html
|
default_filters
|
Get the default filters.
|
def default_filters(self):
"""Get the default filters."""
yield from super().default_filters()
yield ("markdown2html", self.markdown2html)
|
(self)
|
54,316 |
nbconvert.exporters.html
|
from_notebook_node
|
Convert from notebook node.
|
def from_notebook_node( # type:ignore[explicit-override, override]
self, nb: NotebookNode, resources: Optional[Dict[str, Any]] = None, **kw: Any
) -> Tuple[str, Dict[str, Any]]:
"""Convert from notebook node."""
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2HTML(pygments_lexer=lexer, parent=self)
)
resources = self._init_resources(resources)
filter_data_type = WidgetsDataTypeFilter(
notebook_metadata=self._nb_metadata, parent=self, resources=resources
)
self.register_filter("highlight_code", highlight_code)
self.register_filter("filter_data_type", filter_data_type)
html, resources = super().from_notebook_node(nb, resources, **kw)
soup = BeautifulSoup(html, features="html.parser")
# Add image's alternative text
missing_alt = 0
for elem in soup.select("img:not([alt])"):
elem.attrs["alt"] = "No description has been provided for this image"
missing_alt += 1
if missing_alt:
self.log.warning("Alternative text is missing on %s image(s).", missing_alt)
# Set input and output focusable
for elem in soup.select(".jp-Notebook div.jp-Cell-inputWrapper"):
elem.attrs["tabindex"] = "0"
for elem in soup.select(".jp-Notebook div.jp-OutputArea-output"):
elem.attrs["tabindex"] = "0"
return str(soup), resources
|
(self, nb: nbformat.notebooknode.NotebookNode, resources: Optional[Dict[str, Any]] = None, **kw: Any) -> Tuple[str, Dict[str, Any]]
|
54,321 |
nbconvert.exporters.html
|
markdown2html
|
Markdown to HTML filter respecting the anchor_link_text setting
|
@contextfilter
def markdown2html(self, context, source):
"""Markdown to HTML filter respecting the anchor_link_text setting"""
cell = context.get("cell", {})
attachments = cell.get("attachments", {})
path = context.get("resources", {}).get("metadata", {}).get("path", "")
renderer = IPythonRenderer(
escape=False,
attachments=attachments,
embed_images=self.embed_images,
path=path,
anchor_link_text=self.anchor_link_text,
exclude_anchor_links=self.exclude_anchor_links,
)
return MarkdownWithMath(renderer=renderer).render(source)
|
(self, context, source)
|
54,338 |
nbconvert.exporters.latex
|
LatexExporter
|
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom transformers/filters.
If you don't need custom transformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
|
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom transformers/filters.
If you don't need custom transformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
export_from_notebook = "LaTeX"
@default("file_extension")
def _file_extension_default(self):
return ".tex"
@default("template_name")
def _template_name_default(self):
return "latex"
output_mimetype = "text/latex"
def default_filters(self):
"""Get the default filters."""
yield from super().default_filters()
yield ("resolve_references", resolve_references)
@property
def default_config(self):
c = Config(
{
"NbConvertBase": {
"display_data_priority": [
"text/latex",
"application/pdf",
"image/png",
"image/jpeg",
"image/svg+xml",
"text/markdown",
"text/plain",
]
},
"ExtractAttachmentsPreprocessor": {"enabled": True},
"ExtractOutputPreprocessor": {"enabled": True},
"SVG2PDFPreprocessor": {"enabled": True},
"LatexPreprocessor": {"enabled": True},
"SphinxPreprocessor": {"enabled": True},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
if super().default_config:
c2 = super().default_config.copy()
c2.merge(c)
c = c2
return c
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2Latex(pygments_lexer=lexer, parent=self)
)
self.register_filter("highlight_code", highlight_code)
# Need to make sure explicit relative paths are visible to latex for pdf conversion
# https://github.com/jupyter/nbconvert/issues/1998
nb_path = resources.get("metadata", {}).get("path") if resources else None
texinputs = os.path.abspath(nb_path) if nb_path else os.getcwd()
convert_explicitly_relative_paths = self.filters.get(
"convert_explicitly_relative_paths",
ConvertExplicitlyRelativePaths(texinputs=texinputs, parent=self),
)
self.register_filter("convert_explicitly_relative_paths", convert_explicitly_relative_paths)
return super().from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super()._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,344 |
nbconvert.exporters.latex
|
_create_environment
| null |
def _create_environment(self):
environment = super()._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
(self)
|
54,363 |
nbconvert.exporters.latex
|
default_filters
|
Get the default filters.
|
def default_filters(self):
"""Get the default filters."""
yield from super().default_filters()
yield ("resolve_references", resolve_references)
|
(self)
|
54,366 |
nbconvert.exporters.latex
|
from_notebook_node
|
Convert from notebook node.
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2Latex(pygments_lexer=lexer, parent=self)
)
self.register_filter("highlight_code", highlight_code)
# Need to make sure explicit relative paths are visible to latex for pdf conversion
# https://github.com/jupyter/nbconvert/issues/1998
nb_path = resources.get("metadata", {}).get("path") if resources else None
texinputs = os.path.abspath(nb_path) if nb_path else os.getcwd()
convert_explicitly_relative_paths = self.filters.get(
"convert_explicitly_relative_paths",
ConvertExplicitlyRelativePaths(texinputs=texinputs, parent=self),
)
self.register_filter("convert_explicitly_relative_paths", convert_explicitly_relative_paths)
return super().from_notebook_node(nb, resources, **kw)
|
(self, nb, resources=None, **kw)
|
54,387 |
nbconvert.exporters.markdown
|
MarkdownExporter
|
Exports to a markdown document (.md)
|
class MarkdownExporter(TemplateExporter):
"""
Exports to a markdown document (.md)
"""
export_from_notebook = "Markdown"
@default("file_extension")
def _file_extension_default(self):
return ".md"
@default("template_name")
def _template_name_default(self):
return "markdown"
output_mimetype = "text/markdown"
@default("raw_mimetypes")
def _raw_mimetypes_default(self):
return ["text/markdown", "text/html", ""]
@property
def default_config(self):
c = Config(
{
"ExtractAttachmentsPreprocessor": {"enabled": True},
"ExtractOutputPreprocessor": {"enabled": True},
"NbConvertBase": {
"display_data_priority": [
"text/html",
"text/markdown",
"image/svg+xml",
"text/latex",
"image/png",
"image/jpeg",
"text/plain",
]
},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
if super().default_config:
c2 = super().default_config.copy()
c2.merge(c)
c = c2
return c
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,436 |
nbconvert.exporters.notebook
|
NotebookExporter
|
Exports to an IPython notebook.
This is useful when you want to use nbconvert's preprocessors to operate on
a notebook (e.g. to execute it) and then write it back to a notebook file.
|
class NotebookExporter(Exporter):
"""Exports to an IPython notebook.
This is useful when you want to use nbconvert's preprocessors to operate on
a notebook (e.g. to execute it) and then write it back to a notebook file.
"""
nbformat_version = Enum(
list(nbformat.versions),
default_value=nbformat.current_nbformat,
help="""The nbformat version to write.
Use this to downgrade notebooks.
""",
).tag(config=True)
@default("file_extension")
def _file_extension_default(self):
return ".ipynb"
output_mimetype = "application/json"
export_from_notebook = "Notebook"
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
if self.nbformat_version != nb_copy.nbformat:
resources["output_suffix"] = ".v%i" % self.nbformat_version
else:
resources["output_suffix"] = ".nbconvert"
output = nbformat.writes(nb_copy, version=self.nbformat_version)
if not output.endswith("\n"):
output = output + "\n"
return output, resources
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,457 |
nbconvert.exporters.notebook
|
from_notebook_node
|
Convert from notebook node.
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
if self.nbformat_version != nb_copy.nbformat:
resources["output_suffix"] = ".v%i" % self.nbformat_version
else:
resources["output_suffix"] = ".nbconvert"
output = nbformat.writes(nb_copy, version=self.nbformat_version)
if not output.endswith("\n"):
output = output + "\n"
return output, resources
|
(self, nb, resources=None, **kw)
|
54,475 |
nbconvert.exporters.pdf
|
PDFExporter
|
Writer designed to write to PDF files.
This inherits from `LatexExporter`. It creates a LaTeX file in
a temporary directory using the template machinery, and then runs LaTeX
to create a pdf.
|
class PDFExporter(LatexExporter):
"""Writer designed to write to PDF files.
This inherits from `LatexExporter`. It creates a LaTeX file in
a temporary directory using the template machinery, and then runs LaTeX
to create a pdf.
"""
export_from_notebook = "PDF via LaTeX"
latex_count = Integer(3, help="How many times latex will be called.").tag(config=True)
latex_command = List(
["xelatex", "{filename}", "-quiet"], help="Shell command used to compile latex."
).tag(config=True)
bib_command = List(["bibtex", "{filename}"], help="Shell command used to run bibtex.").tag(
config=True
)
verbose = Bool(False, help="Whether to display the output of latex commands.").tag(config=True)
texinputs = Unicode(help="texinputs dir. A notebook's directory is added")
writer = Instance("nbconvert.writers.FilesWriter", args=(), kw={"build_directory": "."})
output_mimetype = "application/pdf"
_captured_output = List(Unicode())
@default("file_extension")
def _file_extension_default(self):
return ".pdf"
@default("template_extension")
def _template_extension_default(self):
return ".tex.j2"
def run_command(self, command_list, filename, count, log_function, raise_on_failure=None):
"""Run command_list count times.
Parameters
----------
command_list : list
A list of args to provide to Popen. Each element of this
list will be interpolated with the filename to convert.
filename : unicode
The name of the file to convert.
count : int
How many times to run the command.
raise_on_failure: Exception class (default None)
If provided, will raise the given exception for if an instead of
returning False on command failure.
Returns
-------
success : bool
A boolean indicating if the command was successful (True)
or failed (False).
"""
command = [c.format(filename=filename) for c in command_list]
# This will throw a clearer error if the command is not found
cmd = shutil.which(command_list[0])
if cmd is None:
link = "https://nbconvert.readthedocs.io/en/latest/install.html#installing-tex"
msg = (
f"{command_list[0]} not found on PATH, if you have not installed "
f"{command_list[0]} you may need to do so. Find further instructions "
f"at {link}."
)
raise OSError(msg)
times = "time" if count == 1 else "times"
self.log.info("Running %s %i %s: %s", command_list[0], count, times, command)
shell = sys.platform == "win32"
if shell:
command = subprocess.list2cmdline(command) # type:ignore[assignment]
env = os.environ.copy()
prepend_to_env_search_path("TEXINPUTS", self.texinputs, env)
prepend_to_env_search_path("BIBINPUTS", self.texinputs, env)
prepend_to_env_search_path("BSTINPUTS", self.texinputs, env)
with open(os.devnull, "rb") as null:
stdout = subprocess.PIPE if not self.verbose else None
for _ in range(count):
p = subprocess.Popen(
command,
stdout=stdout,
stderr=subprocess.STDOUT,
stdin=null,
shell=shell, # noqa: S603
env=env,
)
out, _ = p.communicate()
if p.returncode:
if self.verbose: # noqa: SIM108
# verbose means I didn't capture stdout with PIPE,
# so it's already been displayed and `out` is None.
out_str = ""
else:
out_str = out.decode("utf-8", "replace")
log_function(command, out)
self._captured_output.append(out_str)
if raise_on_failure:
msg = f'Failed to run "{command}" command:\n{out_str}'
raise raise_on_failure(msg)
return False # failure
return True # success
def run_latex(self, filename, raise_on_failure=LatexFailed):
"""Run xelatex self.latex_count times."""
def log_error(command, out):
self.log.critical("%s failed: %s\n%s", command[0], command, out)
return self.run_command(
self.latex_command, filename, self.latex_count, log_error, raise_on_failure
)
def run_bib(self, filename, raise_on_failure=False):
"""Run bibtex one time."""
filename = os.path.splitext(filename)[0]
def log_error(command, out):
self.log.warning(
"%s had problems, most likely because there were no citations", command[0]
)
self.log.debug("%s output: %s\n%s", command[0], command, out)
return self.run_command(self.bib_command, filename, 1, log_error, raise_on_failure)
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
latex, resources = super().from_notebook_node(nb, resources=resources, **kw)
# set texinputs directory, so that local files will be found
if resources and resources.get("metadata", {}).get("path"):
self.texinputs = os.path.abspath(resources["metadata"]["path"])
else:
self.texinputs = os.getcwd()
self._captured_outputs = []
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
notebook_name = "notebook"
resources["output_extension"] = ".tex"
tex_file = self.writer.write(latex, resources, notebook_name=notebook_name)
self.log.info("Building PDF")
self.run_latex(tex_file)
if self.run_bib(tex_file):
self.run_latex(tex_file)
pdf_file = notebook_name + ".pdf"
if not os.path.isfile(pdf_file):
raise LatexFailed("\n".join(self._captured_output))
self.log.info("PDF successfully created")
with open(pdf_file, "rb") as f:
pdf_data = f.read()
# convert output extension to pdf
# the writer above required it to be tex
resources["output_extension"] = ".pdf"
# clear figure outputs and attachments, extracted by latex export,
# so we don't claim to be a multi-file export.
resources.pop("outputs", None)
resources.pop("attachments", None)
return pdf_data, resources
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,503 |
nbconvert.exporters.pdf
|
from_notebook_node
|
Convert from notebook node.
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
latex, resources = super().from_notebook_node(nb, resources=resources, **kw)
# set texinputs directory, so that local files will be found
if resources and resources.get("metadata", {}).get("path"):
self.texinputs = os.path.abspath(resources["metadata"]["path"])
else:
self.texinputs = os.getcwd()
self._captured_outputs = []
with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
notebook_name = "notebook"
resources["output_extension"] = ".tex"
tex_file = self.writer.write(latex, resources, notebook_name=notebook_name)
self.log.info("Building PDF")
self.run_latex(tex_file)
if self.run_bib(tex_file):
self.run_latex(tex_file)
pdf_file = notebook_name + ".pdf"
if not os.path.isfile(pdf_file):
raise LatexFailed("\n".join(self._captured_output))
self.log.info("PDF successfully created")
with open(pdf_file, "rb") as f:
pdf_data = f.read()
# convert output extension to pdf
# the writer above required it to be tex
resources["output_extension"] = ".pdf"
# clear figure outputs and attachments, extracted by latex export,
# so we don't claim to be a multi-file export.
resources.pop("outputs", None)
resources.pop("attachments", None)
return pdf_data, resources
|
(self, nb, resources=None, **kw)
|
54,513 |
nbconvert.exporters.pdf
|
run_bib
|
Run bibtex one time.
|
def run_bib(self, filename, raise_on_failure=False):
"""Run bibtex one time."""
filename = os.path.splitext(filename)[0]
def log_error(command, out):
self.log.warning(
"%s had problems, most likely because there were no citations", command[0]
)
self.log.debug("%s output: %s\n%s", command[0], command, out)
return self.run_command(self.bib_command, filename, 1, log_error, raise_on_failure)
|
(self, filename, raise_on_failure=False)
|
54,514 |
nbconvert.exporters.pdf
|
run_command
|
Run command_list count times.
Parameters
----------
command_list : list
A list of args to provide to Popen. Each element of this
list will be interpolated with the filename to convert.
filename : unicode
The name of the file to convert.
count : int
How many times to run the command.
raise_on_failure: Exception class (default None)
If provided, will raise the given exception for if an instead of
returning False on command failure.
Returns
-------
success : bool
A boolean indicating if the command was successful (True)
or failed (False).
|
def run_command(self, command_list, filename, count, log_function, raise_on_failure=None):
"""Run command_list count times.
Parameters
----------
command_list : list
A list of args to provide to Popen. Each element of this
list will be interpolated with the filename to convert.
filename : unicode
The name of the file to convert.
count : int
How many times to run the command.
raise_on_failure: Exception class (default None)
If provided, will raise the given exception for if an instead of
returning False on command failure.
Returns
-------
success : bool
A boolean indicating if the command was successful (True)
or failed (False).
"""
command = [c.format(filename=filename) for c in command_list]
# This will throw a clearer error if the command is not found
cmd = shutil.which(command_list[0])
if cmd is None:
link = "https://nbconvert.readthedocs.io/en/latest/install.html#installing-tex"
msg = (
f"{command_list[0]} not found on PATH, if you have not installed "
f"{command_list[0]} you may need to do so. Find further instructions "
f"at {link}."
)
raise OSError(msg)
times = "time" if count == 1 else "times"
self.log.info("Running %s %i %s: %s", command_list[0], count, times, command)
shell = sys.platform == "win32"
if shell:
command = subprocess.list2cmdline(command) # type:ignore[assignment]
env = os.environ.copy()
prepend_to_env_search_path("TEXINPUTS", self.texinputs, env)
prepend_to_env_search_path("BIBINPUTS", self.texinputs, env)
prepend_to_env_search_path("BSTINPUTS", self.texinputs, env)
with open(os.devnull, "rb") as null:
stdout = subprocess.PIPE if not self.verbose else None
for _ in range(count):
p = subprocess.Popen(
command,
stdout=stdout,
stderr=subprocess.STDOUT,
stdin=null,
shell=shell, # noqa: S603
env=env,
)
out, _ = p.communicate()
if p.returncode:
if self.verbose: # noqa: SIM108
# verbose means I didn't capture stdout with PIPE,
# so it's already been displayed and `out` is None.
out_str = ""
else:
out_str = out.decode("utf-8", "replace")
log_function(command, out)
self._captured_output.append(out_str)
if raise_on_failure:
msg = f'Failed to run "{command}" command:\n{out_str}'
raise raise_on_failure(msg)
return False # failure
return True # success
|
(self, command_list, filename, count, log_function, raise_on_failure=None)
|
54,515 |
nbconvert.exporters.pdf
|
run_latex
|
Run xelatex self.latex_count times.
|
def run_latex(self, filename, raise_on_failure=LatexFailed):
"""Run xelatex self.latex_count times."""
def log_error(command, out):
self.log.critical("%s failed: %s\n%s", command[0], command, out)
return self.run_command(
self.latex_command, filename, self.latex_count, log_error, raise_on_failure
)
|
(self, filename, raise_on_failure=<class 'nbconvert.exporters.pdf.LatexFailed'>)
|
54,527 |
nbconvert.exporters.python
|
PythonExporter
|
Exports a Python code file.
Note that the file produced will have a shebang of '#!/usr/bin/env python'
regardless of the actual python version used in the notebook.
|
class PythonExporter(TemplateExporter):
"""
Exports a Python code file.
Note that the file produced will have a shebang of '#!/usr/bin/env python'
regardless of the actual python version used in the notebook.
"""
@default("file_extension")
def _file_extension_default(self):
return ".py"
@default("template_name")
def _template_name_default(self):
return "python"
output_mimetype = "text/x-python"
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,576 |
nbconvert.exporters.qtpdf
|
QtPDFExporter
|
Writer designed to write to PDF files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then uses pyqtwebengine to create a pdf.
|
class QtPDFExporter(QtExporter):
"""Writer designed to write to PDF files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then uses pyqtwebengine to create a pdf.
"""
export_from_notebook = "PDF via HTML"
format = "pdf"
paginate = Bool( # type:ignore[assignment]
True,
help="""
Split generated notebook into multiple pages.
If False, a PDF with one long page will be generated.
Set to True to match behavior of LaTeX based PDF generator
""",
).tag(config=True)
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,582 |
nbconvert.exporters.qt_exporter
|
_check_launch_reqs
| null |
def _check_launch_reqs(self):
if sys.platform.startswith("win") and self.format == "png":
msg = "Exporting to PNG using Qt is currently not supported on Windows."
raise RuntimeError(msg)
from .qt_screenshot import QT_INSTALLED
if not QT_INSTALLED:
msg = (
f"PyQtWebEngine is not installed to support Qt {self.format.upper()} conversion. "
f"Please install `nbconvert[qt{self.format}]` to enable."
)
raise RuntimeError(msg)
from .qt_screenshot import QtScreenshot
return QtScreenshot
|
(self)
|
54,600 |
nbconvert.exporters.qt_exporter
|
_run_pyqtwebengine
| null |
def _run_pyqtwebengine(self, html):
ext = ".html"
temp_file = tempfile.NamedTemporaryFile(suffix=ext, delete=False)
filename = f"{temp_file.name[:-len(ext)]}.{self.format}"
with temp_file:
temp_file.write(html.encode("utf-8"))
try:
QtScreenshot = self._check_launch_reqs()
s = QtScreenshot()
s.capture(f"file://{temp_file.name}", filename, self.paginate)
finally:
# Ensure the file is deleted even if pyqtwebengine raises an exception
os.unlink(temp_file.name)
return s.data
|
(self, html)
|
54,606 |
nbconvert.exporters.qt_exporter
|
from_notebook_node
|
Convert from notebook node.
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
self._check_launch_reqs()
html, resources = super().from_notebook_node(nb, resources=resources, **kw)
self.log.info("Building %s", self.format.upper())
data = self._run_pyqtwebengine(html)
self.log.info("%s successfully created", self.format.upper())
# convert output extension
# the writer above required it to be html
resources["output_extension"] = f".{self.format}"
return data, resources
|
(self, nb, resources=None, **kw)
|
54,628 |
nbconvert.exporters.qtpng
|
QtPNGExporter
|
Writer designed to write to PNG files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then uses pyqtwebengine to create a png.
|
class QtPNGExporter(QtExporter):
"""Writer designed to write to PNG files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then uses pyqtwebengine to create a png.
"""
export_from_notebook = "PNG via HTML"
format = "png"
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,680 |
nbconvert.exporters.rst
|
RSTExporter
|
Exports reStructuredText documents.
|
class RSTExporter(TemplateExporter):
"""
Exports reStructuredText documents.
"""
@default("file_extension")
def _file_extension_default(self):
return ".rst"
@default("template_name")
def _template_name_default(self):
return "rst"
output_mimetype = "text/restructuredtext"
export_from_notebook = "reST"
@property
def default_config(self):
c = Config(
{
"CoalesceStreamsPreprocessor": {"enabled": True},
"ExtractOutputPreprocessor": {"enabled": True},
"HighlightMagicsPreprocessor": {"enabled": True},
}
)
if super().default_config:
c2 = super().default_config.copy()
c2.merge(c)
c = c2
return c
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,729 |
nbconvert.exporters.script
|
ScriptExporter
|
A script exporter.
|
class ScriptExporter(TemplateExporter):
"""A script exporter."""
# Caches of already looked-up and instantiated exporters for delegation:
_exporters = Dict()
_lang_exporters = Dict()
export_from_notebook = "Script"
@default("template_file")
def _template_file_default(self):
return "script.j2"
@default("template_name")
def _template_name_default(self):
return "script"
def _get_language_exporter(self, lang_name):
"""Find an exporter for the language name from notebook metadata.
Uses the nbconvert.exporters.script group of entry points.
Returns None if no exporter is found.
"""
if lang_name not in self._lang_exporters:
try:
exporters = entry_points(group="nbconvert.exporters.script")
exporter = [e for e in exporters if e.name == lang_name][0].load() # noqa: RUF015
except (KeyError, IndexError):
self._lang_exporters[lang_name] = None
else:
# TODO: passing config is wrong, but changing this revealed more complicated issues
self._lang_exporters[lang_name] = exporter(config=self.config, parent=self)
return self._lang_exporters[lang_name]
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
langinfo = nb.metadata.get("language_info", {})
# delegate to custom exporter, if specified
exporter_name = langinfo.get("nbconvert_exporter")
if exporter_name and exporter_name != "script":
self.log.debug("Loading script exporter: %s", exporter_name)
if exporter_name not in self._exporters:
exporter = get_exporter(exporter_name)
# TODO: passing config is wrong, but changing this revealed more complicated issues
self._exporters[exporter_name] = exporter(config=self.config, parent=self)
exporter = self._exporters[exporter_name]
return exporter.from_notebook_node(nb, resources, **kw)
# Look up a script exporter for this notebook's language
lang_name = langinfo.get("name")
if lang_name:
self.log.debug("Using script exporter for language: %s", lang_name)
exporter = self._get_language_exporter(lang_name)
if exporter is not None:
return exporter.from_notebook_node(nb, resources, **kw)
# Fall back to plain script export
self.file_extension = langinfo.get("file_extension", ".txt")
self.output_mimetype = langinfo.get("mimetype", "text/plain")
return super().from_notebook_node(nb, resources, **kw)
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,738 |
nbconvert.exporters.script
|
_get_language_exporter
|
Find an exporter for the language name from notebook metadata.
Uses the nbconvert.exporters.script group of entry points.
Returns None if no exporter is found.
|
def _get_language_exporter(self, lang_name):
"""Find an exporter for the language name from notebook metadata.
Uses the nbconvert.exporters.script group of entry points.
Returns None if no exporter is found.
"""
if lang_name not in self._lang_exporters:
try:
exporters = entry_points(group="nbconvert.exporters.script")
exporter = [e for e in exporters if e.name == lang_name][0].load() # noqa: RUF015
except (KeyError, IndexError):
self._lang_exporters[lang_name] = None
else:
# TODO: passing config is wrong, but changing this revealed more complicated issues
self._lang_exporters[lang_name] = exporter(config=self.config, parent=self)
return self._lang_exporters[lang_name]
|
(self, lang_name)
|
54,758 |
nbconvert.exporters.script
|
from_notebook_node
|
Convert from notebook node.
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from notebook node."""
langinfo = nb.metadata.get("language_info", {})
# delegate to custom exporter, if specified
exporter_name = langinfo.get("nbconvert_exporter")
if exporter_name and exporter_name != "script":
self.log.debug("Loading script exporter: %s", exporter_name)
if exporter_name not in self._exporters:
exporter = get_exporter(exporter_name)
# TODO: passing config is wrong, but changing this revealed more complicated issues
self._exporters[exporter_name] = exporter(config=self.config, parent=self)
exporter = self._exporters[exporter_name]
return exporter.from_notebook_node(nb, resources, **kw)
# Look up a script exporter for this notebook's language
lang_name = langinfo.get("name")
if lang_name:
self.log.debug("Using script exporter for language: %s", lang_name)
exporter = self._get_language_exporter(lang_name)
if exporter is not None:
return exporter.from_notebook_node(nb, resources, **kw)
# Fall back to plain script export
self.file_extension = langinfo.get("file_extension", ".txt")
self.output_mimetype = langinfo.get("mimetype", "text/plain")
return super().from_notebook_node(nb, resources, **kw)
|
(self, nb, resources=None, **kw)
|
54,779 |
nbconvert.exporters.slides
|
SlidesExporter
|
Exports HTML slides with reveal.js
|
class SlidesExporter(HTMLExporter):
"""Exports HTML slides with reveal.js"""
# Overrides from HTMLExporter
#################################
export_from_notebook = "Reveal.js slides"
@default("template_name")
def _template_name_default(self):
return "reveal"
@default("file_extension")
def _file_extension_default(self):
return ".slides.html"
@default("template_extension")
def _template_extension_default(self):
return ".html.j2"
# Extra resources
#################################
reveal_url_prefix = Unicode(
help="""The URL prefix for reveal.js (version 3.x).
This defaults to the reveal CDN, but can be any url pointing to a copy
of reveal.js.
For speaker notes to work, this must be a relative path to a local
copy of reveal.js: e.g., "reveal.js".
If a relative path is given, it must be a subdirectory of the
current directory (from which the server is run).
See the usage documentation
(https://nbconvert.readthedocs.io/en/latest/usage.html#reveal-js-html-slideshow)
for more details.
"""
).tag(config=True)
@default("reveal_url_prefix")
def _reveal_url_prefix_default(self):
if "RevealHelpPreprocessor.url_prefix" in self.config:
warn(
"Please update RevealHelpPreprocessor.url_prefix to "
"SlidesExporter.reveal_url_prefix in config files.",
stacklevel=2,
)
return self.config.RevealHelpPreprocessor.url_prefix
return "https://unpkg.com/[email protected]"
reveal_theme = Unicode(
"simple",
help="""
Name of the reveal.js theme to use.
We look for a file with this name under
``reveal_url_prefix``/css/theme/``reveal_theme``.css.
https://github.com/hakimel/reveal.js/tree/master/css/theme has
list of themes that ship by default with reveal.js.
""",
).tag(config=True)
reveal_transition = Unicode(
"slide",
help="""
Name of the reveal.js transition to use.
The list of transitions that ships by default with reveal.js are:
none, fade, slide, convex, concave and zoom.
""",
).tag(config=True)
reveal_scroll = Bool(
False,
help="""
If True, enable scrolling within each slide
""",
).tag(config=True)
reveal_number = Unicode(
"",
help="""
slide number format (e.g. 'c/t'). Choose from:
'c': current, 't': total, 'h': horizontal, 'v': vertical
""",
).tag(config=True)
reveal_width = Unicode(
"",
help="""
width used to determine the aspect ratio of your presentation.
Use the horizontal pixels available on your intended presentation
equipment.
""",
).tag(config=True)
reveal_height = Unicode(
"",
help="""
height used to determine the aspect ratio of your presentation.
Use the horizontal pixels available on your intended presentation
equipment.
""",
).tag(config=True)
font_awesome_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.css",
help="""
URL to load font awesome from.
Defaults to loading from cdnjs.
""",
).tag(config=True)
def _init_resources(self, resources):
resources = super()._init_resources(resources)
if "reveal" not in resources:
resources["reveal"] = {}
resources["reveal"]["url_prefix"] = self.reveal_url_prefix
resources["reveal"]["theme"] = self.reveal_theme
resources["reveal"]["transition"] = self.reveal_transition
resources["reveal"]["scroll"] = self.reveal_scroll
resources["reveal"]["number"] = self.reveal_number
resources["reveal"]["height"] = self.reveal_height
resources["reveal"]["width"] = self.reveal_width
return resources
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,791 |
nbconvert.exporters.slides
|
_init_resources
| null |
def _init_resources(self, resources):
resources = super()._init_resources(resources)
if "reveal" not in resources:
resources["reveal"] = {}
resources["reveal"]["url_prefix"] = self.reveal_url_prefix
resources["reveal"]["theme"] = self.reveal_theme
resources["reveal"]["transition"] = self.reveal_transition
resources["reveal"]["scroll"] = self.reveal_scroll
resources["reveal"]["number"] = self.reveal_number
resources["reveal"]["height"] = self.reveal_height
resources["reveal"]["width"] = self.reveal_width
return resources
|
(self, resources)
|
54,829 |
nbconvert.exporters.templateexporter
|
TemplateExporter
|
Exports notebooks into other file formats. Uses Jinja 2 templating engine
to output new formats. Inherit from this class if you are creating a new
template type along with new filters/preprocessors. If the filters/
preprocessors provided by default suffice, there is no need to inherit from
this class. Instead, override the template_file and file_extension
traits via a config file.
Filters available by default for templates:
- add_anchor
- add_prompts
- ansi2html
- ansi2latex
- ascii_only
- citation2latex
- clean_html
- comment_lines
- convert_pandoc
- escape_html
- escape_html_keep_quotes
- escape_html_script
- escape_latex
- filter_data_type
- get_lines
- get_metadata
- highlight2html
- highlight2latex
- html2text
- indent
- ipython2python
- json_dumps
- markdown2asciidoc
- markdown2html
- markdown2latex
- markdown2rst
- path2url
- posix_path
- prevent_list_blocks
- strip_ansi
- strip_dollars
- strip_files_prefix
- strip_trailing_newline
- text_base64
- wrap_text
|
class TemplateExporter(Exporter):
"""
Exports notebooks into other file formats. Uses Jinja 2 templating engine
to output new formats. Inherit from this class if you are creating a new
template type along with new filters/preprocessors. If the filters/
preprocessors provided by default suffice, there is no need to inherit from
this class. Instead, override the template_file and file_extension
traits via a config file.
Filters available by default for templates:
{filters}
"""
# finish the docstring
__doc__ = __doc__.format(filters="- " + "\n - ".join(sorted(default_filters.keys())))
_template_cached = None
def _invalidate_template_cache(self, change=None):
self._template_cached = None
@property
def template(self):
if self._template_cached is None:
self._template_cached = self._load_template()
return self._template_cached
_environment_cached = None
def _invalidate_environment_cache(self, change=None):
self._environment_cached = None
self._invalidate_template_cache()
@property
def environment(self):
if self._environment_cached is None:
self._environment_cached = self._create_environment()
return self._environment_cached
@property
def default_config(self):
c = Config(
{
"RegexRemovePreprocessor": {"enabled": True},
"TagRemovePreprocessor": {"enabled": True},
}
)
if super().default_config:
c2 = super().default_config.copy()
c2.merge(c)
c = c2
return c
template_name = Unicode(help="Name of the template to use").tag(
config=True, affects_template=True
)
template_file = Unicode(None, allow_none=True, help="Name of the template file to use").tag(
config=True, affects_template=True
)
raw_template = Unicode("", help="raw template string").tag(affects_environment=True)
enable_async = Bool(False, help="Enable Jinja async template execution").tag(
affects_environment=True
)
_last_template_file = ""
_raw_template_key = "<memory>"
@validate("template_name")
def _template_name_validate(self, change):
template_name = change["value"]
if template_name and template_name.endswith(".tpl"):
warnings.warn(
f"5.x style template name passed '{self.template_name}'. Use --template-name for the template directory with a index.<ext>.j2 file and/or --template-file to denote a different template.",
DeprecationWarning,
stacklevel=2,
)
directory, self.template_file = os.path.split(self.template_name)
if directory:
directory, template_name = os.path.split(directory)
if directory and os.path.isabs(directory):
self.extra_template_basedirs = [directory]
return template_name
@observe("template_file")
def _template_file_changed(self, change):
new = change["new"]
if new == "default":
self.template_file = self.default_template # type:ignore[attr-defined]
return
# check if template_file is a file path
# rather than a name already on template_path
full_path = os.path.abspath(new)
if os.path.isfile(full_path):
directory, self.template_file = os.path.split(full_path)
self.extra_template_paths = [directory, *self.extra_template_paths]
# While not strictly an invalid template file name, the extension hints that there isn't a template directory involved
if self.template_file and self.template_file.endswith(".tpl"):
warnings.warn(
f"5.x style template file passed '{new}'. Use --template-name for the template directory with a index.<ext>.j2 file and/or --template-file to denote a different template.",
DeprecationWarning,
stacklevel=2,
)
@default("template_file")
def _template_file_default(self):
if self.template_extension:
return "index" + self.template_extension
return None
@observe("raw_template")
def _raw_template_changed(self, change):
if not change["new"]:
self.template_file = self._last_template_file
self._invalidate_template_cache()
template_paths = List(["."]).tag(config=True, affects_environment=True)
extra_template_basedirs = List(Unicode()).tag(config=True, affects_environment=True)
extra_template_paths = List(Unicode()).tag(config=True, affects_environment=True)
@default("extra_template_basedirs")
def _default_extra_template_basedirs(self):
return [os.getcwd()]
# Extension that the template files use.
template_extension = Unicode().tag(config=True, affects_environment=True)
template_data_paths = List(
jupyter_path("nbconvert", "templates"), help="Path where templates can be installed too."
).tag(affects_environment=True)
@default("template_extension")
def _template_extension_default(self):
if self.file_extension:
return self.file_extension + ".j2"
return self.file_extension
exclude_input = Bool(
False, help="This allows you to exclude code cell inputs from all templates if set to True."
).tag(config=True)
exclude_input_prompt = Bool(
False, help="This allows you to exclude input prompts from all templates if set to True."
).tag(config=True)
exclude_output = Bool(
False,
help="This allows you to exclude code cell outputs from all templates if set to True.",
).tag(config=True)
exclude_output_prompt = Bool(
False, help="This allows you to exclude output prompts from all templates if set to True."
).tag(config=True)
exclude_output_stdin = Bool(
True,
help="This allows you to exclude output of stdin stream from lab template if set to True.",
).tag(config=True)
exclude_code_cell = Bool(
False, help="This allows you to exclude code cells from all templates if set to True."
).tag(config=True)
exclude_markdown = Bool(
False, help="This allows you to exclude markdown cells from all templates if set to True."
).tag(config=True)
exclude_raw = Bool(
False, help="This allows you to exclude raw cells from all templates if set to True."
).tag(config=True)
exclude_unknown = Bool(
False, help="This allows you to exclude unknown cells from all templates if set to True."
).tag(config=True)
extra_loaders: List[t.Any] = List(
help="Jinja loaders to find templates. Will be tried in order "
"before the default FileSystem ones.",
).tag(affects_environment=True)
filters = Dict(
help="""Dictionary of filters, by name and namespace, to add to the Jinja
environment."""
).tag(config=True, affects_environment=True)
raw_mimetypes = List(
Unicode(), help="""formats of raw cells to be included in this Exporter's output."""
).tag(config=True)
@default("raw_mimetypes")
def _raw_mimetypes_default(self):
return [self.output_mimetype, ""]
# TODO: passing config is wrong, but changing this revealed more complicated issues
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : config
User configuration instance.
extra_loaders : list[of Jinja Loaders]
ordered list of Jinja loader to find templates. Will be tried in order
before the default FileSystem ones.
template_file : str (optional, kw arg)
Template to use when exporting.
"""
super().__init__(config=config, **kw)
self.observe(
self._invalidate_environment_cache, list(self.traits(affects_environment=True))
)
self.observe(self._invalidate_template_cache, list(self.traits(affects_template=True)))
def _load_template(self):
"""Load the Jinja template object from the template file
This is triggered by various trait changes that would change the template.
"""
# this gives precedence to a raw_template if present
with self.hold_trait_notifications():
if self.template_file and (self.template_file != self._raw_template_key):
self._last_template_file = self.template_file
if self.raw_template:
self.template_file = self._raw_template_key
if not self.template_file:
msg = "No template_file specified!"
raise ValueError(msg)
# First try to load the
# template by name with extension added, then try loading the template
# as if the name is explicitly specified.
template_file = self.template_file
self.log.debug("Attempting to load template %s", template_file)
self.log.debug(" template_paths: %s", os.pathsep.join(self.template_paths))
return self.environment.get_template(template_file)
def from_filename( # type:ignore[override]
self, filename: str, resources: dict[str, t.Any] | None = None, **kw: t.Any
) -> tuple[str, dict[str, t.Any]]:
"""Convert a notebook from a filename."""
return super().from_filename(filename, resources, **kw) # type:ignore[return-value]
def from_file( # type:ignore[override]
self, file_stream: t.Any, resources: dict[str, t.Any] | None = None, **kw: t.Any
) -> tuple[str, dict[str, t.Any]]:
"""Convert a notebook from a file."""
return super().from_file(file_stream, resources, **kw) # type:ignore[return-value]
def from_notebook_node( # type:ignore[explicit-override, override]
self, nb: NotebookNode, resources: dict[str, t.Any] | None = None, **kw: t.Any
) -> tuple[str, dict[str, t.Any]]:
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~nbformat.NotebookNode`
Notebook node
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
"""
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
resources.setdefault("raw_mimetypes", self.raw_mimetypes)
resources["global_content_filter"] = {
"include_code": not self.exclude_code_cell,
"include_markdown": not self.exclude_markdown,
"include_raw": not self.exclude_raw,
"include_unknown": not self.exclude_unknown,
"include_input": not self.exclude_input,
"include_output": not self.exclude_output,
"include_output_stdin": not self.exclude_output_stdin,
"include_input_prompt": not self.exclude_input_prompt,
"include_output_prompt": not self.exclude_output_prompt,
"no_prompt": self.exclude_input_prompt and self.exclude_output_prompt,
}
# Top level variables are passed to the template_exporter here.
output = self.template.render(nb=nb_copy, resources=resources)
output = output.lstrip("\r\n")
return output, resources
def _register_filter(self, environ, name, jinja_filter):
"""
Register a filter.
A filter is a function that accepts and acts on one string.
The filters are accessible within the Jinja templating engine.
Parameters
----------
name : str
name to give the filter in the Jinja engine
filter : filter
"""
if jinja_filter is None:
msg = "filter"
raise TypeError(msg)
isclass = isinstance(jinja_filter, type)
constructed = not isclass
# Handle filter's registration based on it's type
if constructed and isinstance(jinja_filter, (str,)):
# filter is a string, import the namespace and recursively call
# this register_filter method
filter_cls = import_item(jinja_filter)
return self._register_filter(environ, name, filter_cls)
if constructed and callable(jinja_filter):
# filter is a function, no need to construct it.
environ.filters[name] = jinja_filter
return jinja_filter
if isclass and issubclass(jinja_filter, HasTraits):
# filter is configurable. Make sure to pass in new default for
# the enabled flag if one was specified.
filter_instance = jinja_filter(parent=self)
self._register_filter(environ, name, filter_instance)
return None
if isclass:
# filter is not configurable, construct it
filter_instance = jinja_filter()
self._register_filter(environ, name, filter_instance)
return None
# filter is an instance of something without a __call__
# attribute.
msg = "filter"
raise TypeError(msg)
def register_filter(self, name, jinja_filter):
"""
Register a filter.
A filter is a function that accepts and acts on one string.
The filters are accessible within the Jinja templating engine.
Parameters
----------
name : str
name to give the filter in the Jinja engine
filter : filter
"""
return self._register_filter(self.environment, name, jinja_filter)
def default_filters(self):
"""Override in subclasses to provide extra filters.
This should return an iterable of 2-tuples: (name, class-or-function).
You should call the method on the parent class and include the filters
it provides.
If a name is repeated, the last filter provided wins. Filters from
user-supplied config win over filters provided by classes.
"""
return default_filters.items()
def _create_environment(self):
"""
Create the Jinja templating environment.
"""
paths = self.template_paths
self.log.debug("Template paths:\n\t%s", "\n\t".join(paths))
loaders = [
*self.extra_loaders,
ExtensionTolerantLoader(FileSystemLoader(paths), self.template_extension),
DictLoader({self._raw_template_key: self.raw_template}),
]
environment = Environment( # noqa: S701
loader=ChoiceLoader(loaders),
extensions=JINJA_EXTENSIONS,
enable_async=self.enable_async,
)
environment.globals["uuid4"] = uuid.uuid4
# Add default filters to the Jinja2 environment
for key, value in self.default_filters():
self._register_filter(environment, key, value)
# Load user filters. Overwrite existing filters if need be.
if self.filters:
for key, user_filter in self.filters.items():
self._register_filter(environment, key, user_filter)
return environment
def _init_preprocessors(self):
super()._init_preprocessors()
conf = self._get_conf()
preprocessors = conf.get("preprocessors", {})
# preprocessors is a dict for three reasons
# * We rely on recursive_update, which can only merge dicts, lists will be overwritten
# * We can use the key with numerical prefixing to guarantee ordering (/etc/*.d/XY-file style)
# * We can disable preprocessors by overwriting the value with None
for _, preprocessor in sorted(preprocessors.items(), key=lambda x: x[0]):
if preprocessor is not None:
kwargs = preprocessor.copy()
preprocessor_cls = kwargs.pop("type")
preprocessor_cls = import_item(preprocessor_cls)
if preprocessor_cls.__name__ in self.config:
kwargs.update(self.config[preprocessor_cls.__name__])
preprocessor = preprocessor_cls(**kwargs) # noqa: PLW2901
self.register_preprocessor(preprocessor)
def _get_conf(self):
conf: dict[str, t.Any] = {} # the configuration once all conf files are merged
for path in map(Path, self.template_paths):
conf_path = path / "conf.json"
if conf_path.exists():
with conf_path.open() as f:
conf = recursive_update(conf, json.load(f))
return conf
@default("template_paths")
def _template_paths(self, prune=True, root_dirs=None):
paths = []
root_dirs = self.get_prefix_root_dirs()
template_names = self.get_template_names()
for template_name in template_names:
for base_dir in self.extra_template_basedirs:
path = os.path.join(base_dir, template_name)
if not prune or os.path.exists(path):
paths.append(path)
for root_dir in root_dirs:
base_dir = os.path.join(root_dir, "nbconvert", "templates")
path = os.path.join(base_dir, template_name)
if not prune or os.path.exists(path):
paths.append(path)
for root_dir in root_dirs:
# we include root_dir for when we want to be very explicit, e.g.
# {% extends 'nbconvert/templates/classic/base.html' %}
paths.append(root_dir)
# we include base_dir for when we want to be explicit, but less than root_dir, e.g.
# {% extends 'classic/base.html' %}
base_dir = os.path.join(root_dir, "nbconvert", "templates")
paths.append(base_dir)
compatibility_dir = os.path.join(root_dir, "nbconvert", "templates", "compatibility")
paths.append(compatibility_dir)
additional_paths = []
for path in self.template_data_paths:
if not prune or os.path.exists(path):
additional_paths.append(path)
return paths + self.extra_template_paths + additional_paths
@classmethod
def get_compatibility_base_template_conf(cls, name):
"""Get the base template config."""
# Hard-coded base template confs to use for backwards compatibility for 5.x-only templates
if name == "display_priority":
return {"base_template": "base"}
if name == "full":
return {"base_template": "classic", "mimetypes": {"text/html": True}}
return None
def get_template_names(self):
"""Finds a list of template names where each successive template name is the base template"""
template_names = []
root_dirs = self.get_prefix_root_dirs()
base_template: str | None = self.template_name
merged_conf: dict[str, t.Any] = {} # the configuration once all conf files are merged
while base_template is not None:
template_names.append(base_template)
conf: dict[str, t.Any] = {}
found_at_least_one = False
for base_dir in self.extra_template_basedirs:
template_dir = os.path.join(base_dir, base_template)
if os.path.exists(template_dir):
found_at_least_one = True
conf_file = os.path.join(template_dir, "conf.json")
if os.path.exists(conf_file):
with open(conf_file) as f:
conf = recursive_update(json.load(f), conf)
for root_dir in root_dirs:
template_dir = os.path.join(root_dir, "nbconvert", "templates", base_template)
if os.path.exists(template_dir):
found_at_least_one = True
conf_file = os.path.join(template_dir, "conf.json")
if os.path.exists(conf_file):
with open(conf_file) as f:
conf = recursive_update(json.load(f), conf)
if not found_at_least_one:
# Check for backwards compatibility template names
for root_dir in root_dirs:
compatibility_file = base_template + ".tpl"
compatibility_path = os.path.join(
root_dir, "nbconvert", "templates", "compatibility", compatibility_file
)
if os.path.exists(compatibility_path):
found_at_least_one = True
warnings.warn(
f"5.x template name passed '{self.template_name}'. Use 'lab' or 'classic' for new template usage.",
DeprecationWarning,
stacklevel=2,
)
self.template_file = compatibility_file
conf = self.get_compatibility_base_template_conf(base_template)
self.template_name = t.cast(str, conf.get("base_template"))
break
if not found_at_least_one:
paths = "\n\t".join(root_dirs)
msg = f"No template sub-directory with name {base_template!r} found in the following paths:\n\t{paths}"
raise ValueError(msg)
merged_conf = recursive_update(dict(conf), merged_conf)
base_template = t.cast(t.Any, conf.get("base_template"))
conf = merged_conf
mimetypes = [mimetype for mimetype, enabled in conf.get("mimetypes", {}).items() if enabled]
if self.output_mimetype and self.output_mimetype not in mimetypes and mimetypes:
supported_mimetypes = "\n\t".join(mimetypes)
msg = f"Unsupported mimetype {self.output_mimetype!r} for template {self.template_name!r}, mimetypes supported are: \n\t{supported_mimetypes}"
raise ValueError(msg)
return template_names
def get_prefix_root_dirs(self):
"""Get the prefix root dirs."""
# We look at the usual jupyter locations, and for development purposes also
# relative to the package directory (first entry, meaning with highest precedence)
root_dirs = []
if DEV_MODE:
root_dirs.append(os.path.abspath(os.path.join(ROOT, "..", "..", "share", "jupyter")))
root_dirs.extend(jupyter_path())
return root_dirs
def _init_resources(self, resources):
resources = super()._init_resources(resources)
resources["deprecated"] = deprecated
return resources
|
(config=None, **kw)
|
54,878 |
nbconvert.exporters.webpdf
|
WebPDFExporter
|
Writer designed to write to PDF files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then run playwright to create a pdf.
|
class WebPDFExporter(HTMLExporter):
"""Writer designed to write to PDF files.
This inherits from :class:`HTMLExporter`. It creates the HTML using the
template machinery, and then run playwright to create a pdf.
"""
export_from_notebook = "PDF via HTML"
allow_chromium_download = Bool(
False,
help="Whether to allow downloading Chromium if no suitable version is found on the system.",
).tag(config=True)
paginate = Bool(
True,
help="""
Split generated notebook into multiple pages.
If False, a PDF with one long page will be generated.
Set to True to match behavior of LaTeX based PDF generator
""",
).tag(config=True)
@default("file_extension")
def _file_extension_default(self):
return ".html"
@default("template_name")
def _template_name_default(self):
return "webpdf"
disable_sandbox = Bool(
False,
help="""
Disable chromium security sandbox when converting to PDF.
WARNING: This could cause arbitrary code execution in specific circumstances,
where JS in your notebook can execute serverside code! Please use with
caution.
``https://github.com/puppeteer/puppeteer/blob/main@%7B2020-12-14T17:22:24Z%7D/docs/troubleshooting.md#setting-up-chrome-linux-sandbox``
has more information.
This is required for webpdf to work inside most container environments.
""",
).tag(config=True)
def run_playwright(self, html):
"""Run playwright."""
async def main(temp_file):
"""Run main playwright script."""
args = ["--no-sandbox"] if self.disable_sandbox else []
try:
from playwright.async_api import async_playwright # type: ignore[import-not-found]
except ModuleNotFoundError as e:
msg = (
"Playwright is not installed to support Web PDF conversion. "
"Please install `nbconvert[webpdf]` to enable."
)
raise RuntimeError(msg) from e
if self.allow_chromium_download:
cmd = [sys.executable, "-m", "playwright", "install", "chromium"]
subprocess.check_call(cmd) # noqa: S603
playwright = await async_playwright().start()
chromium = playwright.chromium
try:
browser = await chromium.launch(
handle_sigint=False, handle_sigterm=False, handle_sighup=False, args=args
)
except Exception as e:
msg = (
"No suitable chromium executable found on the system. "
"Please use '--allow-chromium-download' to allow downloading one,"
"or install it using `playwright install chromium`."
)
await playwright.stop()
raise RuntimeError(msg) from e
page = await browser.new_page()
await page.emulate_media(media="print")
await page.wait_for_timeout(100)
await page.goto(f"file://{temp_file.name}", wait_until="networkidle")
await page.wait_for_timeout(100)
pdf_params = {"print_background": True}
if not self.paginate:
# Floating point precision errors cause the printed
# PDF from spilling over a new page by a pixel fraction.
dimensions = await page.evaluate(
"""() => {
const rect = document.body.getBoundingClientRect();
return {
width: Math.ceil(rect.width) + 1,
height: Math.ceil(rect.height) + 1,
}
}"""
)
width = dimensions["width"]
height = dimensions["height"]
# 200 inches is the maximum size for Adobe Acrobat Reader.
pdf_params.update(
{
"width": min(width, 200 * 72),
"height": min(height, 200 * 72),
}
)
pdf_data = await page.pdf(**pdf_params)
await browser.close()
await playwright.stop()
return pdf_data
pool = concurrent.futures.ThreadPoolExecutor()
# Create a temporary file to pass the HTML code to Chromium:
# Unfortunately, tempfile on Windows does not allow for an already open
# file to be opened by a separate process. So we must close it first
# before calling Chromium. We also specify delete=False to ensure the
# file is not deleted after closing (the default behavior).
temp_file = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
with temp_file:
temp_file.write(html.encode("utf-8"))
try:
# TODO: when dropping Python 3.6, use
# pdf_data = pool.submit(asyncio.run, main(temp_file)).result()
def run_coroutine(coro):
"""Run an internal coroutine."""
loop = (
asyncio.ProactorEventLoop() # type:ignore[attr-defined]
if IS_WINDOWS
else asyncio.new_event_loop()
)
asyncio.set_event_loop(loop)
return loop.run_until_complete(coro)
pdf_data = pool.submit(run_coroutine, main(temp_file)).result()
finally:
# Ensure the file is deleted even if playwright raises an exception
os.unlink(temp_file.name)
return pdf_data
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from a notebook node."""
html, resources = super().from_notebook_node(nb, resources=resources, **kw)
self.log.info("Building PDF")
pdf_data = self.run_playwright(html)
self.log.info("PDF successfully created")
# convert output extension to pdf
# the writer above required it to be html
resources["output_extension"] = ".pdf"
return pdf_data, resources
|
(*args: 't.Any', **kwargs: 't.Any') -> 't.Any'
|
54,906 |
nbconvert.exporters.webpdf
|
from_notebook_node
|
Convert from a notebook node.
|
def from_notebook_node(self, nb, resources=None, **kw):
"""Convert from a notebook node."""
html, resources = super().from_notebook_node(nb, resources=resources, **kw)
self.log.info("Building PDF")
pdf_data = self.run_playwright(html)
self.log.info("PDF successfully created")
# convert output extension to pdf
# the writer above required it to be html
resources["output_extension"] = ".pdf"
return pdf_data, resources
|
(self, nb, resources=None, **kw)
|
54,917 |
nbconvert.exporters.webpdf
|
run_playwright
|
Run playwright.
|
def run_playwright(self, html):
"""Run playwright."""
async def main(temp_file):
"""Run main playwright script."""
args = ["--no-sandbox"] if self.disable_sandbox else []
try:
from playwright.async_api import async_playwright # type: ignore[import-not-found]
except ModuleNotFoundError as e:
msg = (
"Playwright is not installed to support Web PDF conversion. "
"Please install `nbconvert[webpdf]` to enable."
)
raise RuntimeError(msg) from e
if self.allow_chromium_download:
cmd = [sys.executable, "-m", "playwright", "install", "chromium"]
subprocess.check_call(cmd) # noqa: S603
playwright = await async_playwright().start()
chromium = playwright.chromium
try:
browser = await chromium.launch(
handle_sigint=False, handle_sigterm=False, handle_sighup=False, args=args
)
except Exception as e:
msg = (
"No suitable chromium executable found on the system. "
"Please use '--allow-chromium-download' to allow downloading one,"
"or install it using `playwright install chromium`."
)
await playwright.stop()
raise RuntimeError(msg) from e
page = await browser.new_page()
await page.emulate_media(media="print")
await page.wait_for_timeout(100)
await page.goto(f"file://{temp_file.name}", wait_until="networkidle")
await page.wait_for_timeout(100)
pdf_params = {"print_background": True}
if not self.paginate:
# Floating point precision errors cause the printed
# PDF from spilling over a new page by a pixel fraction.
dimensions = await page.evaluate(
"""() => {
const rect = document.body.getBoundingClientRect();
return {
width: Math.ceil(rect.width) + 1,
height: Math.ceil(rect.height) + 1,
}
}"""
)
width = dimensions["width"]
height = dimensions["height"]
# 200 inches is the maximum size for Adobe Acrobat Reader.
pdf_params.update(
{
"width": min(width, 200 * 72),
"height": min(height, 200 * 72),
}
)
pdf_data = await page.pdf(**pdf_params)
await browser.close()
await playwright.stop()
return pdf_data
pool = concurrent.futures.ThreadPoolExecutor()
# Create a temporary file to pass the HTML code to Chromium:
# Unfortunately, tempfile on Windows does not allow for an already open
# file to be opened by a separate process. So we must close it first
# before calling Chromium. We also specify delete=False to ensure the
# file is not deleted after closing (the default behavior).
temp_file = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
with temp_file:
temp_file.write(html.encode("utf-8"))
try:
# TODO: when dropping Python 3.6, use
# pdf_data = pool.submit(asyncio.run, main(temp_file)).result()
def run_coroutine(coro):
"""Run an internal coroutine."""
loop = (
asyncio.ProactorEventLoop() # type:ignore[attr-defined]
if IS_WINDOWS
else asyncio.new_event_loop()
)
asyncio.set_event_loop(loop)
return loop.run_until_complete(coro)
pdf_data = pool.submit(run_coroutine, main(temp_file)).result()
finally:
# Ensure the file is deleted even if playwright raises an exception
os.unlink(temp_file.name)
return pdf_data
|
(self, html)
|
54,930 |
nbconvert.exporters.base
|
export
|
Export a notebook object using specific exporter class.
Parameters
----------
exporter : ``Exporter`` class or instance
Class or instance of the exporter that should be used. If the
method initializes its own instance of the class, it is ASSUMED that
the class type provided exposes a constructor (``__init__``) with the same
signature as the base Exporter class.
nb : :class:`~nbformat.NotebookNode`
The notebook to export.
config : config (optional, keyword arg)
User configuration instance.
resources : dict (optional, keyword arg)
Resources used in the conversion process.
Returns
-------
tuple
output : str
The resulting converted notebook.
resources : dictionary
Dictionary of resources used prior to and during the conversion
process.
|
def export(exporter, nb, **kw):
"""
Export a notebook object using specific exporter class.
Parameters
----------
exporter : ``Exporter`` class or instance
Class or instance of the exporter that should be used. If the
method initializes its own instance of the class, it is ASSUMED that
the class type provided exposes a constructor (``__init__``) with the same
signature as the base Exporter class.
nb : :class:`~nbformat.NotebookNode`
The notebook to export.
config : config (optional, keyword arg)
User configuration instance.
resources : dict (optional, keyword arg)
Resources used in the conversion process.
Returns
-------
tuple
output : str
The resulting converted notebook.
resources : dictionary
Dictionary of resources used prior to and during the conversion
process.
"""
# Check arguments
if exporter is None:
msg = "Exporter is None"
raise TypeError(msg)
if not isinstance(exporter, Exporter) and not issubclass(exporter, Exporter):
msg = "exporter does not inherit from Exporter (base)"
raise TypeError(msg)
if nb is None:
msg = "nb is None"
raise TypeError(msg)
# Create the exporter
resources = kw.pop("resources", None)
exporter_instance = exporter if isinstance(exporter, Exporter) else exporter(**kw)
# Try to convert the notebook using the appropriate conversion function.
if isinstance(nb, NotebookNode):
output, resources = exporter_instance.from_notebook_node(nb, resources)
elif isinstance(nb, (str,)):
output, resources = exporter_instance.from_filename(nb, resources)
else:
output, resources = exporter_instance.from_file(nb, resources)
return output, resources
|
(exporter, nb, **kw)
|
54,933 |
nbconvert.exporters.base
|
get_export_names
|
Return a list of the currently supported export targets
Exporters can be found in external packages by registering
them as an nbconvert.exporter entrypoint.
|
def get_export_names(config=get_config()): # noqa: B008
"""Return a list of the currently supported export targets
Exporters can be found in external packages by registering
them as an nbconvert.exporter entrypoint.
"""
exporters = sorted(e.name for e in entry_points(group="nbconvert.exporters"))
if os.environ.get("NBCONVERT_DISABLE_CONFIG_EXPORTERS"):
get_logger().info(
"Config exporter loading disabled, no additional exporters will be automatically included."
)
return exporters
enabled_exporters = []
for exporter_name in exporters:
try:
e = get_exporter(exporter_name)(config=config)
if e.enabled:
enabled_exporters.append(exporter_name)
except (ExporterDisabledError, ValueError):
pass
return enabled_exporters
|
(config={})
|
54,934 |
nbconvert.exporters.base
|
get_exporter
|
Given an exporter name or import path, return a class ready to be instantiated
Raises ExporterName if exporter is not found or ExporterDisabledError if not enabled
|
def get_exporter(name, config=get_config()): # noqa: B008
"""Given an exporter name or import path, return a class ready to be instantiated
Raises ExporterName if exporter is not found or ExporterDisabledError if not enabled
"""
if name == "ipynb":
name = "notebook"
try:
exporters = entry_points(group="nbconvert.exporters")
items = [e for e in exporters if e.name == name or e.name == name.lower()]
exporter = items[0].load()
if getattr(exporter(config=config), "enabled", True):
return exporter
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except IndexError:
pass
if "." in name:
try:
exporter = import_item(name)
if getattr(exporter(config=config), "enabled", True):
return exporter
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except ImportError:
log = get_logger()
log.error("Error importing %s", name, exc_info=True) # noqa: G201
msg = 'Unknown exporter "{}", did you mean one of: {}?'.format(
name, ", ".join(get_export_names())
)
raise ExporterNameError(msg)
|
(name, config={})
|
54,939 |
ml_wrappers.dataset.dataset_wrapper
|
DatasetWrapper
|
A wrapper around a dataset to make dataset operations more uniform across explainers.
|
class DatasetWrapper(object):
"""A wrapper around a dataset to make dataset operations more uniform across explainers."""
def __init__(self, dataset, clear_references=False):
"""Initialize the dataset wrapper.
:param dataset: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type dataset: numpy.ndarray or pandas.DataFrame or panads.Series or scipy.sparse.csr_matrix
or shap.DenseData or torch.Tensor or tensorflow.python.data.ops.dataset_ops.BatchDataset
:param clear_references: A memory optimization that clears all references after use in explainers.
:type clear_references: bool
"""
if not isinstance(dataset, pd.DataFrame) and not isinstance(dataset, pd.Series) and \
not isinstance(dataset, np.ndarray) and not issparse(dataset) and \
not str(type(dataset)).endswith(".DenseData'>") and \
not str(type(dataset)).endswith("torch.Tensor'>") and \
not str(type(dataset)).endswith("BatchDataset'>"):
raise TypeError("Got type {0} which is not supported in DatasetWrapper".format(
type(dataset))
)
self._features = None
self._original_dataset_with_type = dataset
self._dataset_is_df = isinstance(dataset, pd.DataFrame)
self._dataset_is_series = isinstance(dataset, pd.Series)
self._dataset_is_batch = str(type(dataset)).endswith("BatchDataset'>")
self._default_index_cols = ['index']
self._default_index = True
if self._dataset_is_df:
self._features = dataset.columns.values.tolist()
if self._dataset_is_df or self._dataset_is_series:
dataset = dataset.values
elif self._dataset_is_batch:
dataset, features, size = _convert_batch_dataset_to_numpy(dataset)
self._features = features
self._batch_size = size
self._dataset = dataset
self._original_dataset = dataset
self._summary_dataset = None
self._column_indexer = None
self._subset_taken = False
self._summary_computed = False
self._string_indexed = False
self._one_hot_encoded = False
self._one_hot_encoder = None
self._timestamp_featurized = False
self._timestamp_featurizer = None
self._clear_references = clear_references
@property
def dataset(self):
"""Get the dataset.
:return: The underlying dataset.
:rtype: numpy.ndarray or scipy.sparse.csr_matrix
"""
return self._dataset
@property
def typed_dataset(self):
"""Get the dataset in the original type, pandas DataFrame or Series.
:return: The underlying dataset.
:rtype: numpy.ndarray or pandas.DataFrame or pandas.Series or scipy.sparse matrix
"""
wrapper_func = self.typed_wrapper_func
return wrapper_func(self._dataset)
def typed_wrapper_func(self, dataset, keep_index_as_feature=False):
"""Get a wrapper function to convert the dataset to the original type, pandas DataFrame or Series.
:param dataset: The dataset to convert to original type.
:type dataset: numpy.ndarray or scipy.sparse.csr_matrix
:param keep_index_as_feature: Whether to keep the index as a feature when converting back.
Off by default to convert it back to index.
:type keep_index_as_feature: bool
:return: A wrapper function for a given dataset to convert to original type.
:rtype: numpy.ndarray or scipy.sparse.csr_matrix or pandas.DataFrame or pandas.Series
"""
if self._dataset_is_df:
if len(dataset.shape) == 1:
dataset = dataset.reshape(1, dataset.shape[0])
original_dtypes = self._original_dataset_with_type.dtypes
output_types = dict(original_dtypes)
dataframe = pd.DataFrame(dataset, columns=self._features)
if not self._default_index:
if keep_index_as_feature:
# Add the index name to type as feature dtype
for idx, name in enumerate(self._original_dataset_with_type.index.names):
level_values_dtype = self._original_dataset_with_type.index.get_level_values(idx).dtype
output_types.update({name: level_values_dtype})
else:
dataframe = dataframe.set_index(self._default_index_cols)
return dataframe.astype(output_types)
elif self._dataset_is_series:
return pd.Series(dataset)
elif self._dataset_is_batch:
if len(dataset.shape) == 1:
dataset = dataset.reshape(1, dataset.shape[0])
df = pd.DataFrame(dataset, columns=self._features)
tensor_slices = (dict(df), None)
tf_dataset = tf.data.Dataset.from_tensor_slices(tensor_slices)
batch_dataset = tf_dataset.batch(self._batch_size)
return batch_dataset
else:
return dataset
@property
def original_dataset(self):
"""Get the original dataset prior to performing any operations.
Note: if the original dataset was a pandas dataframe, this will return the numpy version.
:return: The original dataset.
:rtype: numpy.ndarray or scipy.sparse matrix
"""
return self._original_dataset
@property
def original_dataset_with_type(self):
"""Get the original typed dataset which could be a numpy array or pandas DataFrame or pandas Series.
:return: The original dataset.
:rtype: numpy.ndarray or pandas.DataFrame or pandas.Series or scipy.sparse matrix
"""
return self._original_dataset_with_type
@property
def num_features(self):
"""Get the number of features (columns) on the dataset.
:return: The number of features (columns) in the dataset.
:rtype: int
"""
evaluation_examples_temp = self._dataset
if isinstance(evaluation_examples_temp, pd.DataFrame):
evaluation_examples_temp = evaluation_examples_temp.values
if len(evaluation_examples_temp.shape) == 1:
return len(evaluation_examples_temp)
elif issparse(evaluation_examples_temp):
return evaluation_examples_temp.shape[1]
else:
return len(evaluation_examples_temp[0])
@property
def summary_dataset(self):
"""Get the summary dataset without any subsetting.
:return: The original dataset or None if summary was not computed.
:rtype: numpy.ndarray or scipy.sparse.csr_matrix
"""
return self._summary_dataset
def _set_default_index_cols(self, dataset):
if dataset.index.names is not None:
self._default_index_cols = dataset.index.names
def set_index(self):
"""Undo reset_index. Set index as feature on internal dataset to be an index again.
"""
if self._dataset_is_df:
dataset = self.typed_dataset
self._features = dataset.columns.values.tolist()
self._dataset = dataset.values
self._default_index = True
def reset_index(self):
"""Reset index to be part of the features on the dataset.
"""
dataset = self._original_dataset_with_type
if self._dataset_is_df:
self._default_index = pd.Index(np.arange(0, len(dataset))).equals(dataset.index)
reset_dataset = dataset
if not self._default_index:
self._set_default_index_cols(dataset)
reset_dataset = dataset.reset_index()
# Move index columns to the end of the dataframe to ensure
# index arguments are still valid to original dataset
dcols = reset_dataset.columns.tolist()
for default_index_col in self._default_index_cols:
dcols.insert(len(dcols), dcols.pop(dcols.index(default_index_col)))
reset_dataset = reset_dataset.reindex(columns=dcols)
self._features = reset_dataset.columns.values.tolist()
self._dataset = reset_dataset.values
def get_features(self, features=None, explain_subset=None, **kwargs):
"""Get the features of the dataset if None on current kwargs.
:return: The features of the dataset if currently None on kwargs.
:rtype: list
"""
if features is not None:
if explain_subset is not None:
return np.array(features)[explain_subset].tolist()
return features
if explain_subset is not None and self._features is not None:
return np.array(self._features)[explain_subset].tolist()
if self._features is None:
return list(range(self._dataset.shape[1]))
return self._features
def get_column_indexes(self, features, categorical_features):
"""Get the column indexes for the given column names.
:param features: The full list of existing column names.
:type features: list[str]
:param categorical_features: The list of categorical feature names to get indexes for.
:type categorical_features: list[str]
:return: The list of column indexes.
:rtype: list[int]
"""
return [features.index(categorical_feature) for categorical_feature in categorical_features]
def string_index(self, columns=None):
"""Indexes categorical string features on the dataset.
:param columns: Optional parameter specifying the subset of columns that may need to be string indexed.
:type columns: list
:return: The transformation steps to index the given dataset.
:rtype: sklearn.compose.ColumnTransformer
"""
if self._string_indexed:
return self._column_indexer
# Optimization so we don't redo this operation multiple times on the same dataset
self._string_indexed = True
# If the data was previously successfully summarized, then there are no
# categorical columns as it must be numeric.
# Also, if the dataset is sparse, we can assume there are no categorical strings
if str(type(self._dataset)).endswith(".DenseData'>") or issparse(self._dataset):
return None
# If the user doesn't have a newer version of scikit-learn with OrdinalEncoder, don't do encoding
try:
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
except ImportError:
return None
tmp_dataset = self._dataset
# Temporarily convert to pandas for easier and uniform string handling,
# only use top sampled rows to limit memory usage for string type test
if isinstance(self._dataset, np.ndarray):
tmp_dataset = pd.DataFrame(self._dataset[:SAMPLED_STRING_ROWS, :], dtype=self._dataset.dtype)
else:
tmp_dataset = tmp_dataset.iloc[:SAMPLED_STRING_ROWS]
categorical_col_names = list(np.array(list(tmp_dataset))[(tmp_dataset.applymap(type) == str).all(0)])
if categorical_col_names:
all_columns = tmp_dataset.columns
if columns is not None:
categorical_col_indices = \
[all_columns.get_loc(col_name) for col_name in categorical_col_names if col_name in columns]
else:
categorical_col_indices = [all_columns.get_loc(col_name) for col_name in categorical_col_names]
ordinal_enc = OrdinalEncoder()
ct = ColumnTransformer([('ord', ordinal_enc, categorical_col_indices)], remainder='drop')
string_indexes_dataset = ct.fit_transform(self._dataset)
# Inplace replacement of columns
# (danger: using remainder=passthrough with ColumnTransformer will change column order!)
for idx, categorical_col_index in enumerate(categorical_col_indices):
self._dataset[:, categorical_col_index] = string_indexes_dataset[:, idx]
self._column_indexer = ct
return self._column_indexer
def one_hot_encode(self, columns):
"""Indexes categorical string features on the dataset.
:param columns: Parameter specifying the subset of column indexes that may need to be one-hot-encoded.
:type columns: list[int]
:return: The transformation steps to one-hot-encode the given dataset.
:rtype: sklearn.preprocessing.OneHotEncoder
"""
if self._one_hot_encoded:
return self._one_hot_encoder
# Optimization so we don't redo this operation multiple times on the same dataset
self._one_hot_encoded = True
# If the data was previously successfully summarized, then there are no
# categorical columns as it must be numeric.
# Also, if the dataset is sparse, we can assume there are no categorical strings
if not columns or str(type(self._dataset)).endswith(".DenseData'>") or issparse(self._dataset):
return None
# If the user doesn't have a newer version of scikit-learn with OneHotEncoder, don't do encoding
try:
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
except ImportError:
return None
one_hot_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
self._one_hot_encoder = ColumnTransformer([('ord', one_hot_encoder, columns)], remainder='passthrough')
# Note this will change column order, the one hot encoded columns will be at the start and the
# rest of the columns at the end
self._dataset = self._one_hot_encoder.fit_transform(self._dataset.astype(float)).astype(float)
return self._one_hot_encoder
def timestamp_featurizer(self):
"""Featurizes the timestamp columns.
:return: The transformation steps to featurize the timestamp columns.
:rtype: ml_wrappers.DatasetWrapper
"""
if self._timestamp_featurized:
return self._timestamp_featurizer
# Optimization so we don't redo this operation multiple times on the same dataset
self._timestamp_featurized = True
# If the data was previously successfully summarized, then there are no
# categorical columns as it must be numeric.
# Also, if the dataset is sparse, we can assume there are no categorical strings
if str(type(self._dataset)).endswith(".DenseData'>") or issparse(self._dataset):
return None
typed_dataset_without_index = self.typed_wrapper_func(self._dataset, keep_index_as_feature=True)
self._timestamp_featurizer = CustomTimestampFeaturizer(self._features).fit(typed_dataset_without_index)
self._dataset = self._timestamp_featurizer.transform(self._dataset)
return self._timestamp_featurizer
def apply_indexer(self, column_indexer, bucket_unknown=False):
"""Indexes categorical string features on the dataset.
:param column_indexer: The transformation steps to index the given dataset.
:type column_indexer: sklearn.compose.ColumnTransformer
:param bucket_unknown: If true, buckets unknown values to separate categorical level.
:type bucket_unknown: bool
"""
if self._string_indexed or issparse(self._dataset):
return
name, ordinal_encoder, cols = column_indexer.transformers_[0]
all_categories = ordinal_encoder.categories_
def convert_cols(category_to_index, value, unknown):
if value in category_to_index:
index = category_to_index[value]
elif not bucket_unknown:
# Add new index on the fly - note the background data does NOT need to
# contain all possible category levels!
index = len(category_to_index) + 1
category_to_index[value] = index
else:
# Put all unknown indexes into a separate 'unknown' bucket
index = unknown
category_to_index[value] = index
return index
for idx, i in enumerate(cols):
categories_for_col = all_categories[idx]
category_to_index = dict(zip(categories_for_col, range(len(categories_for_col))))
unknown = len(category_to_index) + 1
self._dataset[:, i] = list(map(lambda x: convert_cols(category_to_index, x, unknown), self._dataset[:, i]))
# Ensure element types are float and not object
self._dataset = self._dataset.astype(float)
self._string_indexed = True
def apply_one_hot_encoder(self, one_hot_encoder):
"""One-hot-encode categorical string features on the dataset.
:param one_hot_encoder: The transformation steps to one-hot-encode the given dataset.
:type one_hot_encoder: sklearn.preprocessing.OneHotEncoder
"""
if self._one_hot_encoded or issparse(self._dataset):
return
self._dataset = one_hot_encoder.transform(self._dataset).astype(float)
self._one_hot_encoded = True
def apply_timestamp_featurizer(self, timestamp_featurizer):
"""Apply timestamp featurization on the dataset.
:param timestamp_featurizer: The transformation steps to featurize timestamps in the given dataset.
:type timestamp_featurizer: CustomTimestampFeaturizer
"""
if self._timestamp_featurized or issparse(self._dataset):
return
self._dataset = timestamp_featurizer.transform(self._dataset)
self._timestamp_featurized = True
def compute_summary(self, nclusters=10, use_gpu=False, **kwargs):
"""Summarizes the dataset if it hasn't been summarized yet."""
if self._summary_computed:
return
self._summary_dataset = _summarize_data(self._dataset, nclusters, use_gpu)
self._dataset = self._summary_dataset
self._summary_computed = True
def augment_data(self, max_num_of_augmentations=np.inf):
"""Augment the current dataset.
:param max_augment_data_size: number of times we stack permuted x to augment.
:type max_augment_data_size: int
"""
self._dataset = _generate_augmented_data(self._dataset, max_num_of_augmentations=max_num_of_augmentations)
def take_subset(self, explain_subset):
"""Take a subset of the dataset if not done before.
:param explain_subset: A list of column indexes to take from the original dataset.
:type explain_subset: list
"""
if self._subset_taken:
return
# Edge case: Take the subset of the summary in this case,
# more optimal than recomputing the summary!
explain_subset = np.array(explain_subset)
self._dataset = self._dataset[:, explain_subset]
self._subset_taken = True
def _reduce_examples(self, max_dim_clustering=Defaults.MAX_DIM):
"""Reduces the dimensionality of the examples if dimensionality is higher than max_dim_clustering.
If the dataset is sparse, we mean-scale the data and then run
truncated SVD to reduce the number of features to max_dim_clustering. For dense
dataset, we also scale the data and then run PCA to reduce the number of features to
max_dim_clustering.
This is used to get better clustering results in _find_k.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
"""
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.preprocessing import StandardScaler
num_cols = self._dataset.shape[1]
# Run PCA or SVD on input data and reduce to about MAX_DIM features prior to clustering
components = min(max_dim_clustering, num_cols)
reduced_examples = self._dataset
if components != num_cols:
if issparse(self._dataset):
module_logger.debug('Reducing sparse data with StandardScaler and TruncatedSVD')
normalized_examples = StandardScaler(with_mean=False).fit_transform(self._dataset)
reducer = TruncatedSVD(n_components=components)
else:
module_logger.debug('Reducing normal data with StandardScaler and PCA')
normalized_examples = StandardScaler().fit_transform(self._dataset)
reducer = PCA(n_components=components)
module_logger.info('reducing dimensionality to {0} components for clustering'.format(str(components)))
reduced_examples = reducer.fit_transform(normalized_examples)
return reduced_examples
def _find_k_kmeans(self, max_dim_clustering=Defaults.MAX_DIM):
"""Use k-means to downsample the examples.
Starting from k_upper_bound, cuts k in half each time and run k-means
clustering on the examples. After each run, computes the
silhouette score and stores k with highest silhouette score.
We use optimal k to determine how much to downsample the examples.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
"""
from math import ceil, isnan, log
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
reduced_examples = self._reduce_examples(max_dim_clustering)
num_rows = self._dataset.shape[0]
k_upper_bound = 2000
k_list = []
k = min(num_rows / 2, k_upper_bound)
for _ in range(int(ceil(log(num_rows, 2) - 7))):
k_list.append(int(k))
k /= 2
prev_highest_score = -1
prev_highest_index = 0
opt_k = int(k)
for k_index, k in enumerate(k_list):
module_logger.info('running KMeans with k: {}'.format(str(k)))
km = KMeans(n_clusters=k).fit(reduced_examples)
clusters = km.labels_
num_clusters = len(set(clusters))
k_too_big = num_clusters <= 1
if k_too_big or num_clusters == reduced_examples.shape[0]:
score = -1
else:
score = silhouette_score(reduced_examples, clusters)
if isnan(score):
score = -1
module_logger.info('KMeans silhouette score: {}'.format(str(score)))
# Find k with highest silhouette score for optimal clustering
if score >= prev_highest_score and not k_too_big:
prev_highest_score = score
prev_highest_index = k_index
opt_k = k_list[prev_highest_index]
module_logger.info('best silhouette score: {}'.format(str(prev_highest_score)))
module_logger.info('optimal k for KMeans: {}'.format(str(opt_k)))
return opt_k
def _find_k_hdbscan(self, max_dim_clustering=Defaults.MAX_DIM):
"""Use hdbscan to downsample the examples.
We use optimal k to determine how much to downsample the examples.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
"""
import hdbscan
num_rows = self._dataset.shape[0]
reduced_examples = self._reduce_examples(max_dim_clustering)
hdbs = hdbscan.HDBSCAN(min_cluster_size=2).fit(reduced_examples)
clusters = hdbs.labels_
opt_k = len(set(clusters))
clustering_threshold = 5
samples = opt_k * clustering_threshold
module_logger.info(('found optimal k for hdbscan: {},'
' will use clustering_threshold * k for sampling: {}').format(str(opt_k), str(samples)))
return min(samples, num_rows)
def sample(self, max_dim_clustering=Defaults.MAX_DIM, sampling_method=Defaults.HDBSCAN):
"""Sample the examples.
First does random downsampling to upper_bound rows,
then tries to find the optimal downsample based on how many clusters can be constructed
from the data. If sampling_method is hdbscan, uses hdbscan to cluster the
data and then downsamples to that number of clusters. If sampling_method is k-means,
uses different values of k, cutting in half each time, and chooses the k with highest
silhouette score to determine how much to downsample the data.
The danger of using only random downsampling is that we might downsample too much
or too little, so the clustering approach is a heuristic to give us some idea of
how much we should downsample to.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
:param sampling_method: Method to use for sampling, can be 'hdbscan' or 'kmeans'.
:type sampling_method: str
"""
from sklearn.utils import resample
# bounds are rough estimates that came from manual investigation
lower_bound = 200
upper_bound = 10000
num_rows = self._dataset.shape[0]
module_logger.info('sampling examples')
# If less than lower_bound rows, just return the full dataset
if num_rows < lower_bound:
return self._dataset
# If more than upper_bound rows, sample randomly
elif num_rows > upper_bound:
module_logger.info('randomly sampling to 10k rows')
self._dataset = resample(self._dataset, n_samples=upper_bound, random_state=7)
num_rows = upper_bound
if sampling_method == Defaults.HDBSCAN:
try:
opt_k = self._find_k_hdbscan(max_dim_clustering)
except Exception as ex:
module_logger.warning(('Failed to use hdbscan due to error: {}'
'\nEnsure hdbscan is installed with: pip install hdbscan').format(str(ex)))
opt_k = self._find_k_kmeans(max_dim_clustering)
else:
opt_k = self._find_k_kmeans(max_dim_clustering)
# Resample based on optimal number of clusters
if (opt_k < num_rows):
self._dataset = resample(self._dataset, n_samples=opt_k, random_state=7)
return self._dataset
def _clear(self):
"""Optimization for memory usage.
Clears all internal references so they can be garbage collected.
"""
if self._clear_references:
self._features = None
self._original_dataset_with_type = None
self._dataset_is_df = None
self._dataset_is_series = None
self._default_index_cols = None
self._default_index = None
self._dataset = None
self._original_dataset = None
self._summary_dataset = None
self._column_indexer = None
self._subset_taken = False
self._summary_computed = False
self._string_indexed = False
self._one_hot_encoded = False
self._one_hot_encoder = None
self._timestamp_featurized = False
self._timestamp_featurizer = None
|
(dataset, clear_references=False)
|
54,940 |
ml_wrappers.dataset.dataset_wrapper
|
__init__
|
Initialize the dataset wrapper.
:param dataset: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type dataset: numpy.ndarray or pandas.DataFrame or panads.Series or scipy.sparse.csr_matrix
or shap.DenseData or torch.Tensor or tensorflow.python.data.ops.dataset_ops.BatchDataset
:param clear_references: A memory optimization that clears all references after use in explainers.
:type clear_references: bool
|
def __init__(self, dataset, clear_references=False):
"""Initialize the dataset wrapper.
:param dataset: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type dataset: numpy.ndarray or pandas.DataFrame or panads.Series or scipy.sparse.csr_matrix
or shap.DenseData or torch.Tensor or tensorflow.python.data.ops.dataset_ops.BatchDataset
:param clear_references: A memory optimization that clears all references after use in explainers.
:type clear_references: bool
"""
if not isinstance(dataset, pd.DataFrame) and not isinstance(dataset, pd.Series) and \
not isinstance(dataset, np.ndarray) and not issparse(dataset) and \
not str(type(dataset)).endswith(".DenseData'>") and \
not str(type(dataset)).endswith("torch.Tensor'>") and \
not str(type(dataset)).endswith("BatchDataset'>"):
raise TypeError("Got type {0} which is not supported in DatasetWrapper".format(
type(dataset))
)
self._features = None
self._original_dataset_with_type = dataset
self._dataset_is_df = isinstance(dataset, pd.DataFrame)
self._dataset_is_series = isinstance(dataset, pd.Series)
self._dataset_is_batch = str(type(dataset)).endswith("BatchDataset'>")
self._default_index_cols = ['index']
self._default_index = True
if self._dataset_is_df:
self._features = dataset.columns.values.tolist()
if self._dataset_is_df or self._dataset_is_series:
dataset = dataset.values
elif self._dataset_is_batch:
dataset, features, size = _convert_batch_dataset_to_numpy(dataset)
self._features = features
self._batch_size = size
self._dataset = dataset
self._original_dataset = dataset
self._summary_dataset = None
self._column_indexer = None
self._subset_taken = False
self._summary_computed = False
self._string_indexed = False
self._one_hot_encoded = False
self._one_hot_encoder = None
self._timestamp_featurized = False
self._timestamp_featurizer = None
self._clear_references = clear_references
|
(self, dataset, clear_references=False)
|
54,941 |
ml_wrappers.dataset.dataset_wrapper
|
_clear
|
Optimization for memory usage.
Clears all internal references so they can be garbage collected.
|
def _clear(self):
"""Optimization for memory usage.
Clears all internal references so they can be garbage collected.
"""
if self._clear_references:
self._features = None
self._original_dataset_with_type = None
self._dataset_is_df = None
self._dataset_is_series = None
self._default_index_cols = None
self._default_index = None
self._dataset = None
self._original_dataset = None
self._summary_dataset = None
self._column_indexer = None
self._subset_taken = False
self._summary_computed = False
self._string_indexed = False
self._one_hot_encoded = False
self._one_hot_encoder = None
self._timestamp_featurized = False
self._timestamp_featurizer = None
|
(self)
|
54,942 |
ml_wrappers.dataset.dataset_wrapper
|
_find_k_hdbscan
|
Use hdbscan to downsample the examples.
We use optimal k to determine how much to downsample the examples.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
|
def _find_k_hdbscan(self, max_dim_clustering=Defaults.MAX_DIM):
"""Use hdbscan to downsample the examples.
We use optimal k to determine how much to downsample the examples.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
"""
import hdbscan
num_rows = self._dataset.shape[0]
reduced_examples = self._reduce_examples(max_dim_clustering)
hdbs = hdbscan.HDBSCAN(min_cluster_size=2).fit(reduced_examples)
clusters = hdbs.labels_
opt_k = len(set(clusters))
clustering_threshold = 5
samples = opt_k * clustering_threshold
module_logger.info(('found optimal k for hdbscan: {},'
' will use clustering_threshold * k for sampling: {}').format(str(opt_k), str(samples)))
return min(samples, num_rows)
|
(self, max_dim_clustering=50)
|
54,943 |
ml_wrappers.dataset.dataset_wrapper
|
_find_k_kmeans
|
Use k-means to downsample the examples.
Starting from k_upper_bound, cuts k in half each time and run k-means
clustering on the examples. After each run, computes the
silhouette score and stores k with highest silhouette score.
We use optimal k to determine how much to downsample the examples.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
|
def _find_k_kmeans(self, max_dim_clustering=Defaults.MAX_DIM):
"""Use k-means to downsample the examples.
Starting from k_upper_bound, cuts k in half each time and run k-means
clustering on the examples. After each run, computes the
silhouette score and stores k with highest silhouette score.
We use optimal k to determine how much to downsample the examples.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
"""
from math import ceil, isnan, log
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
reduced_examples = self._reduce_examples(max_dim_clustering)
num_rows = self._dataset.shape[0]
k_upper_bound = 2000
k_list = []
k = min(num_rows / 2, k_upper_bound)
for _ in range(int(ceil(log(num_rows, 2) - 7))):
k_list.append(int(k))
k /= 2
prev_highest_score = -1
prev_highest_index = 0
opt_k = int(k)
for k_index, k in enumerate(k_list):
module_logger.info('running KMeans with k: {}'.format(str(k)))
km = KMeans(n_clusters=k).fit(reduced_examples)
clusters = km.labels_
num_clusters = len(set(clusters))
k_too_big = num_clusters <= 1
if k_too_big or num_clusters == reduced_examples.shape[0]:
score = -1
else:
score = silhouette_score(reduced_examples, clusters)
if isnan(score):
score = -1
module_logger.info('KMeans silhouette score: {}'.format(str(score)))
# Find k with highest silhouette score for optimal clustering
if score >= prev_highest_score and not k_too_big:
prev_highest_score = score
prev_highest_index = k_index
opt_k = k_list[prev_highest_index]
module_logger.info('best silhouette score: {}'.format(str(prev_highest_score)))
module_logger.info('optimal k for KMeans: {}'.format(str(opt_k)))
return opt_k
|
(self, max_dim_clustering=50)
|
54,944 |
ml_wrappers.dataset.dataset_wrapper
|
_reduce_examples
|
Reduces the dimensionality of the examples if dimensionality is higher than max_dim_clustering.
If the dataset is sparse, we mean-scale the data and then run
truncated SVD to reduce the number of features to max_dim_clustering. For dense
dataset, we also scale the data and then run PCA to reduce the number of features to
max_dim_clustering.
This is used to get better clustering results in _find_k.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
|
def _reduce_examples(self, max_dim_clustering=Defaults.MAX_DIM):
"""Reduces the dimensionality of the examples if dimensionality is higher than max_dim_clustering.
If the dataset is sparse, we mean-scale the data and then run
truncated SVD to reduce the number of features to max_dim_clustering. For dense
dataset, we also scale the data and then run PCA to reduce the number of features to
max_dim_clustering.
This is used to get better clustering results in _find_k.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
"""
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.preprocessing import StandardScaler
num_cols = self._dataset.shape[1]
# Run PCA or SVD on input data and reduce to about MAX_DIM features prior to clustering
components = min(max_dim_clustering, num_cols)
reduced_examples = self._dataset
if components != num_cols:
if issparse(self._dataset):
module_logger.debug('Reducing sparse data with StandardScaler and TruncatedSVD')
normalized_examples = StandardScaler(with_mean=False).fit_transform(self._dataset)
reducer = TruncatedSVD(n_components=components)
else:
module_logger.debug('Reducing normal data with StandardScaler and PCA')
normalized_examples = StandardScaler().fit_transform(self._dataset)
reducer = PCA(n_components=components)
module_logger.info('reducing dimensionality to {0} components for clustering'.format(str(components)))
reduced_examples = reducer.fit_transform(normalized_examples)
return reduced_examples
|
(self, max_dim_clustering=50)
|
54,945 |
ml_wrappers.dataset.dataset_wrapper
|
_set_default_index_cols
| null |
def _set_default_index_cols(self, dataset):
if dataset.index.names is not None:
self._default_index_cols = dataset.index.names
|
(self, dataset)
|
54,946 |
ml_wrappers.dataset.dataset_wrapper
|
apply_indexer
|
Indexes categorical string features on the dataset.
:param column_indexer: The transformation steps to index the given dataset.
:type column_indexer: sklearn.compose.ColumnTransformer
:param bucket_unknown: If true, buckets unknown values to separate categorical level.
:type bucket_unknown: bool
|
def apply_indexer(self, column_indexer, bucket_unknown=False):
"""Indexes categorical string features on the dataset.
:param column_indexer: The transformation steps to index the given dataset.
:type column_indexer: sklearn.compose.ColumnTransformer
:param bucket_unknown: If true, buckets unknown values to separate categorical level.
:type bucket_unknown: bool
"""
if self._string_indexed or issparse(self._dataset):
return
name, ordinal_encoder, cols = column_indexer.transformers_[0]
all_categories = ordinal_encoder.categories_
def convert_cols(category_to_index, value, unknown):
if value in category_to_index:
index = category_to_index[value]
elif not bucket_unknown:
# Add new index on the fly - note the background data does NOT need to
# contain all possible category levels!
index = len(category_to_index) + 1
category_to_index[value] = index
else:
# Put all unknown indexes into a separate 'unknown' bucket
index = unknown
category_to_index[value] = index
return index
for idx, i in enumerate(cols):
categories_for_col = all_categories[idx]
category_to_index = dict(zip(categories_for_col, range(len(categories_for_col))))
unknown = len(category_to_index) + 1
self._dataset[:, i] = list(map(lambda x: convert_cols(category_to_index, x, unknown), self._dataset[:, i]))
# Ensure element types are float and not object
self._dataset = self._dataset.astype(float)
self._string_indexed = True
|
(self, column_indexer, bucket_unknown=False)
|
54,947 |
ml_wrappers.dataset.dataset_wrapper
|
apply_one_hot_encoder
|
One-hot-encode categorical string features on the dataset.
:param one_hot_encoder: The transformation steps to one-hot-encode the given dataset.
:type one_hot_encoder: sklearn.preprocessing.OneHotEncoder
|
def apply_one_hot_encoder(self, one_hot_encoder):
"""One-hot-encode categorical string features on the dataset.
:param one_hot_encoder: The transformation steps to one-hot-encode the given dataset.
:type one_hot_encoder: sklearn.preprocessing.OneHotEncoder
"""
if self._one_hot_encoded or issparse(self._dataset):
return
self._dataset = one_hot_encoder.transform(self._dataset).astype(float)
self._one_hot_encoded = True
|
(self, one_hot_encoder)
|
54,948 |
ml_wrappers.dataset.dataset_wrapper
|
apply_timestamp_featurizer
|
Apply timestamp featurization on the dataset.
:param timestamp_featurizer: The transformation steps to featurize timestamps in the given dataset.
:type timestamp_featurizer: CustomTimestampFeaturizer
|
def apply_timestamp_featurizer(self, timestamp_featurizer):
"""Apply timestamp featurization on the dataset.
:param timestamp_featurizer: The transformation steps to featurize timestamps in the given dataset.
:type timestamp_featurizer: CustomTimestampFeaturizer
"""
if self._timestamp_featurized or issparse(self._dataset):
return
self._dataset = timestamp_featurizer.transform(self._dataset)
self._timestamp_featurized = True
|
(self, timestamp_featurizer)
|
54,949 |
ml_wrappers.dataset.dataset_wrapper
|
augment_data
|
Augment the current dataset.
:param max_augment_data_size: number of times we stack permuted x to augment.
:type max_augment_data_size: int
|
def augment_data(self, max_num_of_augmentations=np.inf):
"""Augment the current dataset.
:param max_augment_data_size: number of times we stack permuted x to augment.
:type max_augment_data_size: int
"""
self._dataset = _generate_augmented_data(self._dataset, max_num_of_augmentations=max_num_of_augmentations)
|
(self, max_num_of_augmentations=inf)
|
54,950 |
ml_wrappers.dataset.dataset_wrapper
|
compute_summary
|
Summarizes the dataset if it hasn't been summarized yet.
|
def compute_summary(self, nclusters=10, use_gpu=False, **kwargs):
"""Summarizes the dataset if it hasn't been summarized yet."""
if self._summary_computed:
return
self._summary_dataset = _summarize_data(self._dataset, nclusters, use_gpu)
self._dataset = self._summary_dataset
self._summary_computed = True
|
(self, nclusters=10, use_gpu=False, **kwargs)
|
54,951 |
ml_wrappers.dataset.dataset_wrapper
|
get_column_indexes
|
Get the column indexes for the given column names.
:param features: The full list of existing column names.
:type features: list[str]
:param categorical_features: The list of categorical feature names to get indexes for.
:type categorical_features: list[str]
:return: The list of column indexes.
:rtype: list[int]
|
def get_column_indexes(self, features, categorical_features):
"""Get the column indexes for the given column names.
:param features: The full list of existing column names.
:type features: list[str]
:param categorical_features: The list of categorical feature names to get indexes for.
:type categorical_features: list[str]
:return: The list of column indexes.
:rtype: list[int]
"""
return [features.index(categorical_feature) for categorical_feature in categorical_features]
|
(self, features, categorical_features)
|
54,952 |
ml_wrappers.dataset.dataset_wrapper
|
get_features
|
Get the features of the dataset if None on current kwargs.
:return: The features of the dataset if currently None on kwargs.
:rtype: list
|
def get_features(self, features=None, explain_subset=None, **kwargs):
"""Get the features of the dataset if None on current kwargs.
:return: The features of the dataset if currently None on kwargs.
:rtype: list
"""
if features is not None:
if explain_subset is not None:
return np.array(features)[explain_subset].tolist()
return features
if explain_subset is not None and self._features is not None:
return np.array(self._features)[explain_subset].tolist()
if self._features is None:
return list(range(self._dataset.shape[1]))
return self._features
|
(self, features=None, explain_subset=None, **kwargs)
|
54,953 |
ml_wrappers.dataset.dataset_wrapper
|
one_hot_encode
|
Indexes categorical string features on the dataset.
:param columns: Parameter specifying the subset of column indexes that may need to be one-hot-encoded.
:type columns: list[int]
:return: The transformation steps to one-hot-encode the given dataset.
:rtype: sklearn.preprocessing.OneHotEncoder
|
def one_hot_encode(self, columns):
"""Indexes categorical string features on the dataset.
:param columns: Parameter specifying the subset of column indexes that may need to be one-hot-encoded.
:type columns: list[int]
:return: The transformation steps to one-hot-encode the given dataset.
:rtype: sklearn.preprocessing.OneHotEncoder
"""
if self._one_hot_encoded:
return self._one_hot_encoder
# Optimization so we don't redo this operation multiple times on the same dataset
self._one_hot_encoded = True
# If the data was previously successfully summarized, then there are no
# categorical columns as it must be numeric.
# Also, if the dataset is sparse, we can assume there are no categorical strings
if not columns or str(type(self._dataset)).endswith(".DenseData'>") or issparse(self._dataset):
return None
# If the user doesn't have a newer version of scikit-learn with OneHotEncoder, don't do encoding
try:
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
except ImportError:
return None
one_hot_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
self._one_hot_encoder = ColumnTransformer([('ord', one_hot_encoder, columns)], remainder='passthrough')
# Note this will change column order, the one hot encoded columns will be at the start and the
# rest of the columns at the end
self._dataset = self._one_hot_encoder.fit_transform(self._dataset.astype(float)).astype(float)
return self._one_hot_encoder
|
(self, columns)
|
54,954 |
ml_wrappers.dataset.dataset_wrapper
|
reset_index
|
Reset index to be part of the features on the dataset.
|
def reset_index(self):
"""Reset index to be part of the features on the dataset.
"""
dataset = self._original_dataset_with_type
if self._dataset_is_df:
self._default_index = pd.Index(np.arange(0, len(dataset))).equals(dataset.index)
reset_dataset = dataset
if not self._default_index:
self._set_default_index_cols(dataset)
reset_dataset = dataset.reset_index()
# Move index columns to the end of the dataframe to ensure
# index arguments are still valid to original dataset
dcols = reset_dataset.columns.tolist()
for default_index_col in self._default_index_cols:
dcols.insert(len(dcols), dcols.pop(dcols.index(default_index_col)))
reset_dataset = reset_dataset.reindex(columns=dcols)
self._features = reset_dataset.columns.values.tolist()
self._dataset = reset_dataset.values
|
(self)
|
54,955 |
ml_wrappers.dataset.dataset_wrapper
|
sample
|
Sample the examples.
First does random downsampling to upper_bound rows,
then tries to find the optimal downsample based on how many clusters can be constructed
from the data. If sampling_method is hdbscan, uses hdbscan to cluster the
data and then downsamples to that number of clusters. If sampling_method is k-means,
uses different values of k, cutting in half each time, and chooses the k with highest
silhouette score to determine how much to downsample the data.
The danger of using only random downsampling is that we might downsample too much
or too little, so the clustering approach is a heuristic to give us some idea of
how much we should downsample to.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
:param sampling_method: Method to use for sampling, can be 'hdbscan' or 'kmeans'.
:type sampling_method: str
|
def sample(self, max_dim_clustering=Defaults.MAX_DIM, sampling_method=Defaults.HDBSCAN):
"""Sample the examples.
First does random downsampling to upper_bound rows,
then tries to find the optimal downsample based on how many clusters can be constructed
from the data. If sampling_method is hdbscan, uses hdbscan to cluster the
data and then downsamples to that number of clusters. If sampling_method is k-means,
uses different values of k, cutting in half each time, and chooses the k with highest
silhouette score to determine how much to downsample the data.
The danger of using only random downsampling is that we might downsample too much
or too little, so the clustering approach is a heuristic to give us some idea of
how much we should downsample to.
:param max_dim_clustering: Dimensionality threshold for performing reduction.
:type max_dim_clustering: int
:param sampling_method: Method to use for sampling, can be 'hdbscan' or 'kmeans'.
:type sampling_method: str
"""
from sklearn.utils import resample
# bounds are rough estimates that came from manual investigation
lower_bound = 200
upper_bound = 10000
num_rows = self._dataset.shape[0]
module_logger.info('sampling examples')
# If less than lower_bound rows, just return the full dataset
if num_rows < lower_bound:
return self._dataset
# If more than upper_bound rows, sample randomly
elif num_rows > upper_bound:
module_logger.info('randomly sampling to 10k rows')
self._dataset = resample(self._dataset, n_samples=upper_bound, random_state=7)
num_rows = upper_bound
if sampling_method == Defaults.HDBSCAN:
try:
opt_k = self._find_k_hdbscan(max_dim_clustering)
except Exception as ex:
module_logger.warning(('Failed to use hdbscan due to error: {}'
'\nEnsure hdbscan is installed with: pip install hdbscan').format(str(ex)))
opt_k = self._find_k_kmeans(max_dim_clustering)
else:
opt_k = self._find_k_kmeans(max_dim_clustering)
# Resample based on optimal number of clusters
if (opt_k < num_rows):
self._dataset = resample(self._dataset, n_samples=opt_k, random_state=7)
return self._dataset
|
(self, max_dim_clustering=50, sampling_method='hdbscan')
|
54,956 |
ml_wrappers.dataset.dataset_wrapper
|
set_index
|
Undo reset_index. Set index as feature on internal dataset to be an index again.
|
def set_index(self):
"""Undo reset_index. Set index as feature on internal dataset to be an index again.
"""
if self._dataset_is_df:
dataset = self.typed_dataset
self._features = dataset.columns.values.tolist()
self._dataset = dataset.values
self._default_index = True
|
(self)
|
54,957 |
ml_wrappers.dataset.dataset_wrapper
|
string_index
|
Indexes categorical string features on the dataset.
:param columns: Optional parameter specifying the subset of columns that may need to be string indexed.
:type columns: list
:return: The transformation steps to index the given dataset.
:rtype: sklearn.compose.ColumnTransformer
|
def string_index(self, columns=None):
"""Indexes categorical string features on the dataset.
:param columns: Optional parameter specifying the subset of columns that may need to be string indexed.
:type columns: list
:return: The transformation steps to index the given dataset.
:rtype: sklearn.compose.ColumnTransformer
"""
if self._string_indexed:
return self._column_indexer
# Optimization so we don't redo this operation multiple times on the same dataset
self._string_indexed = True
# If the data was previously successfully summarized, then there are no
# categorical columns as it must be numeric.
# Also, if the dataset is sparse, we can assume there are no categorical strings
if str(type(self._dataset)).endswith(".DenseData'>") or issparse(self._dataset):
return None
# If the user doesn't have a newer version of scikit-learn with OrdinalEncoder, don't do encoding
try:
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
except ImportError:
return None
tmp_dataset = self._dataset
# Temporarily convert to pandas for easier and uniform string handling,
# only use top sampled rows to limit memory usage for string type test
if isinstance(self._dataset, np.ndarray):
tmp_dataset = pd.DataFrame(self._dataset[:SAMPLED_STRING_ROWS, :], dtype=self._dataset.dtype)
else:
tmp_dataset = tmp_dataset.iloc[:SAMPLED_STRING_ROWS]
categorical_col_names = list(np.array(list(tmp_dataset))[(tmp_dataset.applymap(type) == str).all(0)])
if categorical_col_names:
all_columns = tmp_dataset.columns
if columns is not None:
categorical_col_indices = \
[all_columns.get_loc(col_name) for col_name in categorical_col_names if col_name in columns]
else:
categorical_col_indices = [all_columns.get_loc(col_name) for col_name in categorical_col_names]
ordinal_enc = OrdinalEncoder()
ct = ColumnTransformer([('ord', ordinal_enc, categorical_col_indices)], remainder='drop')
string_indexes_dataset = ct.fit_transform(self._dataset)
# Inplace replacement of columns
# (danger: using remainder=passthrough with ColumnTransformer will change column order!)
for idx, categorical_col_index in enumerate(categorical_col_indices):
self._dataset[:, categorical_col_index] = string_indexes_dataset[:, idx]
self._column_indexer = ct
return self._column_indexer
|
(self, columns=None)
|
54,958 |
ml_wrappers.dataset.dataset_wrapper
|
take_subset
|
Take a subset of the dataset if not done before.
:param explain_subset: A list of column indexes to take from the original dataset.
:type explain_subset: list
|
def take_subset(self, explain_subset):
"""Take a subset of the dataset if not done before.
:param explain_subset: A list of column indexes to take from the original dataset.
:type explain_subset: list
"""
if self._subset_taken:
return
# Edge case: Take the subset of the summary in this case,
# more optimal than recomputing the summary!
explain_subset = np.array(explain_subset)
self._dataset = self._dataset[:, explain_subset]
self._subset_taken = True
|
(self, explain_subset)
|
54,959 |
ml_wrappers.dataset.dataset_wrapper
|
timestamp_featurizer
|
Featurizes the timestamp columns.
:return: The transformation steps to featurize the timestamp columns.
:rtype: ml_wrappers.DatasetWrapper
|
def timestamp_featurizer(self):
"""Featurizes the timestamp columns.
:return: The transformation steps to featurize the timestamp columns.
:rtype: ml_wrappers.DatasetWrapper
"""
if self._timestamp_featurized:
return self._timestamp_featurizer
# Optimization so we don't redo this operation multiple times on the same dataset
self._timestamp_featurized = True
# If the data was previously successfully summarized, then there are no
# categorical columns as it must be numeric.
# Also, if the dataset is sparse, we can assume there are no categorical strings
if str(type(self._dataset)).endswith(".DenseData'>") or issparse(self._dataset):
return None
typed_dataset_without_index = self.typed_wrapper_func(self._dataset, keep_index_as_feature=True)
self._timestamp_featurizer = CustomTimestampFeaturizer(self._features).fit(typed_dataset_without_index)
self._dataset = self._timestamp_featurizer.transform(self._dataset)
return self._timestamp_featurizer
|
(self)
|
54,960 |
ml_wrappers.dataset.dataset_wrapper
|
typed_wrapper_func
|
Get a wrapper function to convert the dataset to the original type, pandas DataFrame or Series.
:param dataset: The dataset to convert to original type.
:type dataset: numpy.ndarray or scipy.sparse.csr_matrix
:param keep_index_as_feature: Whether to keep the index as a feature when converting back.
Off by default to convert it back to index.
:type keep_index_as_feature: bool
:return: A wrapper function for a given dataset to convert to original type.
:rtype: numpy.ndarray or scipy.sparse.csr_matrix or pandas.DataFrame or pandas.Series
|
def typed_wrapper_func(self, dataset, keep_index_as_feature=False):
"""Get a wrapper function to convert the dataset to the original type, pandas DataFrame or Series.
:param dataset: The dataset to convert to original type.
:type dataset: numpy.ndarray or scipy.sparse.csr_matrix
:param keep_index_as_feature: Whether to keep the index as a feature when converting back.
Off by default to convert it back to index.
:type keep_index_as_feature: bool
:return: A wrapper function for a given dataset to convert to original type.
:rtype: numpy.ndarray or scipy.sparse.csr_matrix or pandas.DataFrame or pandas.Series
"""
if self._dataset_is_df:
if len(dataset.shape) == 1:
dataset = dataset.reshape(1, dataset.shape[0])
original_dtypes = self._original_dataset_with_type.dtypes
output_types = dict(original_dtypes)
dataframe = pd.DataFrame(dataset, columns=self._features)
if not self._default_index:
if keep_index_as_feature:
# Add the index name to type as feature dtype
for idx, name in enumerate(self._original_dataset_with_type.index.names):
level_values_dtype = self._original_dataset_with_type.index.get_level_values(idx).dtype
output_types.update({name: level_values_dtype})
else:
dataframe = dataframe.set_index(self._default_index_cols)
return dataframe.astype(output_types)
elif self._dataset_is_series:
return pd.Series(dataset)
elif self._dataset_is_batch:
if len(dataset.shape) == 1:
dataset = dataset.reshape(1, dataset.shape[0])
df = pd.DataFrame(dataset, columns=self._features)
tensor_slices = (dict(df), None)
tf_dataset = tf.data.Dataset.from_tensor_slices(tensor_slices)
batch_dataset = tf_dataset.batch(self._batch_size)
return batch_dataset
else:
return dataset
|
(self, dataset, keep_index_as_feature=False)
|
54,967 |
ml_wrappers.model.model_wrapper
|
wrap_model
|
If needed, wraps the model in a common API based on model task and
prediction function contract.
:param model: The model to evaluate on the examples.
:type model: model with a predict or predict_proba function.
:param examples: The model evaluation examples.
Note the examples will be wrapped in a DatasetWrapper, if not
wrapped when input.
:type examples: ml_wrappers.DatasetWrapper or numpy.ndarray
or pandas.DataFrame or panads.Series or scipy.sparse.csr_matrix
or shap.DenseData or torch.Tensor
:param model_task: Optional parameter to specify whether the model
is a classification or regression model.
In most cases, the type of the model can be inferred
based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional
array, while a regressor has a predict method and
outputs a 1 dimensional array.
:param classes: optional parameter specifying a list of class names
the dataset
:type classes: list or np.ndarray
:param num_classes: optional parameter specifying the number of classes in
the dataset
:type num_classes: int
:type model_task: str
:param device: optional parameter specifying the device to move the model
to. If not specified, then cpu is the default
:type device: str, for instance: 'cpu', 'cuda'
:return: The wrapper model.
:rtype: model
|
def wrap_model(model, examples, model_task: str = ModelTask.UNKNOWN,
num_classes: int = None, classes: Union[list, np.array] = None,
device=Device.AUTO.value):
"""If needed, wraps the model in a common API based on model task and
prediction function contract.
:param model: The model to evaluate on the examples.
:type model: model with a predict or predict_proba function.
:param examples: The model evaluation examples.
Note the examples will be wrapped in a DatasetWrapper, if not
wrapped when input.
:type examples: ml_wrappers.DatasetWrapper or numpy.ndarray
or pandas.DataFrame or panads.Series or scipy.sparse.csr_matrix
or shap.DenseData or torch.Tensor
:param model_task: Optional parameter to specify whether the model
is a classification or regression model.
In most cases, the type of the model can be inferred
based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional
array, while a regressor has a predict method and
outputs a 1 dimensional array.
:param classes: optional parameter specifying a list of class names
the dataset
:type classes: list or np.ndarray
:param num_classes: optional parameter specifying the number of classes in
the dataset
:type num_classes: int
:type model_task: str
:param device: optional parameter specifying the device to move the model
to. If not specified, then cpu is the default
:type device: str, for instance: 'cpu', 'cuda'
:return: The wrapper model.
:rtype: model
"""
if model_task == ModelTask.UNKNOWN and _is_transformers_pipeline(model):
# TODO: can we also dynamically figure out the task if it was
# originally unknown for text scenarios?
raise ValueError("ModelTask must be specified for text-based models")
if model_task in text_model_tasks:
return _wrap_text_model(model, examples, model_task, False)[0]
if model_task in image_model_tasks:
return _wrap_image_model(model, examples, model_task,
False, num_classes, classes,
device)[0]
return _wrap_model(model, examples, model_task, False)[0]
|
(model, examples, model_task: str = <ModelTask.UNKNOWN: 'unknown'>, num_classes: Optional[int] = None, classes: Union[list, <built-in function array>, NoneType] = None, device='auto')
|
54,968 |
scrypt.scrypt
|
decrypt
|
Decrypt a string using a password.
Notes for Python 2:
- `input` and `password` must be str instances
- The result will be a str instance
- The encoding parameter is ignored
Notes for Python 3:
- `input` and `password` can be both str and bytes. If they are str
instances, they wil be encoded with utf-8. `input` *should*
really be a bytes instance, since that's what `encrypt` returns.
- The result will be a str instance encoded with `encoding`.
If encoding=None, the result will be a bytes instance.
Exceptions raised:
- TypeError on invalid input
- scrypt.error if decryption failed
For more information on the `maxtime`, `maxmem`, and `maxmemfrac`
parameters, see the scrypt documentation.
|
def decrypt(
input,
password,
maxtime=MAXTIME_DEFAULT,
maxmem=MAXMEM_DEFAULT,
maxmemfrac=MAXMEMFRAC_DEFAULT,
encoding='utf-8',
):
"""Decrypt a string using a password.
Notes for Python 2:
- `input` and `password` must be str instances
- The result will be a str instance
- The encoding parameter is ignored
Notes for Python 3:
- `input` and `password` can be both str and bytes. If they are str
instances, they wil be encoded with utf-8. `input` *should*
really be a bytes instance, since that's what `encrypt` returns.
- The result will be a str instance encoded with `encoding`.
If encoding=None, the result will be a bytes instance.
Exceptions raised:
- TypeError on invalid input
- scrypt.error if decryption failed
For more information on the `maxtime`, `maxmem`, and `maxmemfrac`
parameters, see the scrypt documentation.
"""
outbuf = create_string_buffer(len(input))
outbuflen = pointer(c_size_t(0))
input = _ensure_bytes(input)
password = _ensure_bytes(password)
# verbose and force are set to zero
result = _scryptdec_buf(
input,
len(input),
outbuf,
outbuflen,
password,
len(password),
maxmem,
maxmemfrac,
maxtime,
0,
0,
)
if result:
raise error(result)
out_bytes = outbuf.raw[: outbuflen.contents.value]
if encoding is None:
return out_bytes
return str(out_bytes, encoding)
|
(input, password, maxtime=300.0, maxmem=0, maxmemfrac=0.5, encoding='utf-8')
|
54,969 |
scrypt.scrypt
|
encrypt
|
Encrypt a string using a password.
The resulting data will have len = len(input)
+ 128.
Notes for Python 2:
- `input` and `password` must be str instances
- The result will be a str instance
Notes for Python 3:
- `input` and `password` can be both str and bytes. If they are str
instances, they will be encoded with utf-8
- The result will be a bytes instance
Exceptions raised:
- TypeError on invalid input
- scrypt.error if encryption failed
For more information on the `maxtime`, `maxmem`, and `maxmemfrac`
parameters, see the scrypt documentation.
|
def encrypt(
input,
password,
maxtime=MAXTIME_DEFAULT_ENC,
maxmem=MAXMEM_DEFAULT,
maxmemfrac=MAXMEMFRAC_DEFAULT,
):
"""Encrypt a string using a password.
The resulting data will have len = len(input)
+ 128.
Notes for Python 2:
- `input` and `password` must be str instances
- The result will be a str instance
Notes for Python 3:
- `input` and `password` can be both str and bytes. If they are str
instances, they will be encoded with utf-8
- The result will be a bytes instance
Exceptions raised:
- TypeError on invalid input
- scrypt.error if encryption failed
For more information on the `maxtime`, `maxmem`, and `maxmemfrac`
parameters, see the scrypt documentation.
"""
input = _ensure_bytes(input)
password = _ensure_bytes(password)
outbuf = create_string_buffer(len(input) + 128)
# verbose is set to zero
result = _scryptenc_buf(
input,
len(input),
outbuf,
password,
len(password),
maxmem,
maxmemfrac,
maxtime,
0,
)
if result:
raise error(result)
return outbuf.raw
|
(input, password, maxtime=5.0, maxmem=0, maxmemfrac=0.5)
|
54,970 |
scrypt.scrypt
|
error
| null |
class error(Exception):
def __init__(self, scrypt_code):
if isinstance(scrypt_code, int):
self._scrypt_code = scrypt_code
super().__init__(ERROR_MESSAGES[scrypt_code])
else:
self._scrypt_code = -1
super().__init__(scrypt_code)
|
(scrypt_code)
|
54,971 |
scrypt.scrypt
|
__init__
| null |
def __init__(self, scrypt_code):
if isinstance(scrypt_code, int):
self._scrypt_code = scrypt_code
super().__init__(ERROR_MESSAGES[scrypt_code])
else:
self._scrypt_code = -1
super().__init__(scrypt_code)
|
(self, scrypt_code)
|
54,972 |
scrypt.scrypt
|
hash
|
Compute scrypt(password, salt, N, r, p, buflen).
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32. The parameter N must be a power of 2
greater than 1. N, r and p must all be positive.
- `password` and `salt` can be both str and bytes. If they are str
instances, they wil be encoded with utf-8.
- The result will be a bytes instance
Exceptions raised:
- TypeError on invalid input
- scrypt.error if scrypt failed
|
def hash(password, salt, N=1 << 14, r=8, p=1, buflen=64):
"""Compute scrypt(password, salt, N, r, p, buflen).
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32. The parameter N must be a power of 2
greater than 1. N, r and p must all be positive.
- `password` and `salt` can be both str and bytes. If they are str
instances, they wil be encoded with utf-8.
- The result will be a bytes instance
Exceptions raised:
- TypeError on invalid input
- scrypt.error if scrypt failed
"""
outbuf = create_string_buffer(buflen)
password = _ensure_bytes(password)
salt = _ensure_bytes(salt)
if r * p >= (1 << 30) or N <= 1 or (N & (N - 1)) != 0 or p < 1 or r < 1:
raise error(
'hash parameters are wrong (r*p should be < 2**30, '
'and N should be a power of two > 1)'
)
result = _crypto_scrypt(
password, len(password), salt, len(salt), N, r, p, outbuf, buflen, 0
)
if result:
raise error('could not compute hash')
return outbuf.raw
|
(password, salt, N=16384, r=8, p=1, buflen=64)
|
54,974 |
pytest_deadfixtures
|
AvailableFixture
|
AvailableFixture(relpath, argname, fixturedef)
|
from pytest_deadfixtures import AvailableFixture
|
(relpath, argname, fixturedef)
|
54,976 |
namedtuple_AvailableFixture
|
__new__
|
Create new instance of AvailableFixture(relpath, argname, fixturedef)
|
from builtins import function
|
(_cls, relpath, argname, fixturedef)
|
54,979 |
collections
|
_replace
|
Return a new AvailableFixture object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
54,980 |
pytest_deadfixtures
|
CachedFixture
|
CachedFixture(fixturedef, relpath, result)
|
from pytest_deadfixtures import CachedFixture
|
(fixturedef, relpath, result)
|
54,982 |
namedtuple_CachedFixture
|
__new__
|
Create new instance of CachedFixture(fixturedef, relpath, result)
|
from builtins import function
|
(_cls, fixturedef, relpath, result)
|
54,985 |
collections
|
_replace
|
Return a new CachedFixture object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
54,987 |
pytest_deadfixtures
|
_show_dead_fixtures
| null |
def _show_dead_fixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, show_dead_fixtures)
|
(config)
|
54,990 |
pytest_deadfixtures
|
get_best_relpath
| null |
def get_best_relpath(func, curdir):
loc = getlocation(func, curdir)
return curdir.bestrelpath(loc)
|
(func, curdir)
|
54,991 |
pytest_deadfixtures
|
get_fixtures
| null |
def get_fixtures(session):
available = []
seen = set()
fm = session._fixturemanager
curdir = py.path.local()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
for fixturedef in fixturedefs:
loc = getlocation(fixturedef.func, curdir)
if (fixturedef.argname, loc) in seen:
continue
seen.add((fixturedef.argname, loc))
module = fixturedef.func.__module__
if (
not module.startswith("_pytest.")
and not module.startswith("pytest_")
and not ("site-packages" in loc)
and not ("dist-packages" in loc)
and not ("<string>" in loc)
):
available.append(
AvailableFixture(
curdir.bestrelpath(loc), fixturedef.argname, fixturedef
)
)
available.sort(key=lambda a: a.relpath)
return available
|
(session)
|
54,992 |
pytest_deadfixtures
|
get_used_fixturesdefs
| null |
def get_used_fixturesdefs(session):
fixturesdefs = []
for test_function in session.items:
try:
info = test_function._fixtureinfo
except AttributeError:
# doctests items have no _fixtureinfo attribute
continue
if not info.name2fixturedefs:
# this test item does not use any fixtures
continue
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
if fixturedefs is None:
continue
fixturesdefs.append(fixturedefs[-1])
return fixturesdefs
|
(session)
|
54,993 |
_pytest.compat
|
getlocation
| null |
def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str:
function = get_real_func(function)
fn = Path(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None:
try:
relfn = fn.relative_to(curdir)
except ValueError:
pass
else:
return "%s:%d" % (relfn, lineno + 1)
return "%s:%d" % (fn, lineno + 1)
|
(function, curdir: Union[str, os.PathLike[str], NoneType] = None) -> str
|
54,996 |
pytest_deadfixtures
|
pytest_addoption
| null |
def pytest_addoption(parser):
group = parser.getgroup("deadfixtures")
group.addoption(
"--dead-fixtures",
action="store_true",
dest="deadfixtures",
default=False,
help="Show fixtures not being used",
)
group.addoption(
"--dup-fixtures",
action="store_true",
dest="showrepeated",
default=False,
help="Show duplicated fixtures",
)
|
(parser)
|
54,997 |
pytest_deadfixtures
|
pytest_cmdline_main
| null |
def pytest_cmdline_main(config):
if config.option.deadfixtures:
config.option.show_fixture_doc = config.option.verbose
config.option.verbose = -1
if _show_dead_fixtures(config):
return EXIT_CODE_ERROR
return EXIT_CODE_SUCCESS
|
(config)
|
54,998 |
pytest_deadfixtures
|
pytest_fixture_post_finalizer
| null |
def pytest_fixture_post_finalizer(fixturedef):
if getattr(fixturedef, "cached_result", None):
curdir = py.path.local()
loc = getlocation(fixturedef.func, curdir)
cached_fixtures.append(
CachedFixture(
fixturedef, curdir.bestrelpath(loc), fixturedef.cached_result[0]
)
)
|
(fixturedef)
|
54,999 |
pytest_deadfixtures
|
pytest_sessionfinish
| null |
def pytest_sessionfinish(session, exitstatus):
if exitstatus or not session.config.getvalue("showrepeated"):
return exitstatus
tw = _pytest.config.create_terminal_writer(session.config)
duplicated_fixtures = []
for a, b in combinations(cached_fixtures, 2):
if same_fixture(a, b):
duplicated_fixtures.append((a, b))
if duplicated_fixtures:
tw.line(DUPLICATE_FIXTURES_HEADLINE, red=True)
msg = "Fixture name: {}, location: {}"
for a, b in duplicated_fixtures:
tw.line(msg.format(a.fixturedef.argname, a.relpath))
tw.line(msg.format(b.fixturedef.argname, b.relpath))
|
(session, exitstatus)
|
55,000 |
pytest_deadfixtures
|
same_fixture
| null |
def same_fixture(one, two):
def result_same_type(a, b):
return isinstance(a.result, type(b.result))
def same_result(a, b):
if not a.result or not b.result:
return False
if hasattr(a.result, "__dict__") or hasattr(b.result, "__dict__"):
return a.result.__dict__ == b.result.__dict__
return a.result == b.result
def same_loc(a, b):
return a.relpath == b.relpath
return result_same_type(one, two) and same_result(one, two) and not same_loc(one, two)
|
(one, two)
|
55,001 |
pytest_deadfixtures
|
show_dead_fixtures
| null |
def show_dead_fixtures(config, session):
session.perform_collect()
tw = _pytest.config.create_terminal_writer(config)
show_fixture_doc = config.getvalue("show_fixture_doc")
used_fixtures = get_used_fixturesdefs(session)
available_fixtures = get_fixtures(session)
unused_fixtures = [
fixture
for fixture in available_fixtures
if fixture.fixturedef not in used_fixtures
]
tw.line()
if unused_fixtures:
tw.line(UNUSED_FIXTURES_FOUND_HEADLINE, red=True)
write_fixtures(tw, unused_fixtures, show_fixture_doc)
else:
tw.line(UNUSED_FIXTURES_NOT_FOUND_HEADLINE, green=True)
return unused_fixtures
|
(config, session)
|
55,002 |
pytest_deadfixtures
|
write_docstring
| null |
def write_docstring(tw, doc):
INDENT = " "
doc = doc.rstrip()
if "\n" in doc:
firstline, rest = doc.split("\n", 1)
else:
firstline, rest = doc, ""
if firstline.strip():
tw.line(INDENT + firstline.strip())
if rest:
for line in dedent(rest).split("\n"):
tw.write(INDENT + line + "\n")
|
(tw, doc)
|
55,003 |
pytest_deadfixtures
|
write_fixtures
| null |
def write_fixtures(tw, fixtures, write_docs):
for fixture in fixtures:
tplt = "Fixture name: {}, location: {}"
tw.line(tplt.format(fixture.argname, fixture.relpath))
doc = fixture.fixturedef.func.__doc__ or ""
if write_docs and doc:
write_docstring(tw, doc)
|
(tw, fixtures, write_docs)
|
55,004 |
eip712.messages
|
EIP712Message
|
Container for EIP-712 messages with type information, domain separator
parameters, and the message object.
|
class EIP712Message(EIP712Type):
"""
Container for EIP-712 messages with type information, domain separator
parameters, and the message object.
"""
# NOTE: Must override at least one of these fields
_name_: Optional[str] = None
_version_: Optional[str] = None
_chainId_: Optional[int] = None
_verifyingContract_: Optional[str] = None
_salt_: Optional[bytes] = None
def __post_init__(self):
# At least one of the header fields must be in the EIP712 message header
if not any(getattr(self, f"_{field}_") for field in EIP712_DOMAIN_FIELDS):
raise ValidationError(
f"EIP712 Message definition '{repr(self)}' must define "
f"at least one of: _{'_, _'.join(EIP712_DOMAIN_FIELDS)}_"
)
@property
def _domain_(self) -> dict:
"""The EIP-712 domain structure to be used for serialization and hashing."""
domain_type = [
{"name": field, "type": abi_type}
for field, abi_type in EIP712_DOMAIN_FIELDS.items()
if getattr(self, f"_{field}_")
]
return {
"types": {
"EIP712Domain": domain_type,
},
"domain": {field["name"]: getattr(self, f"_{field['name']}_") for field in domain_type},
}
@property
def _body_(self) -> dict:
"""The EIP-712 structured message to be used for serialization and hashing."""
return {
"domain": self._domain_["domain"],
"types": dict(self._types_, **self._domain_["types"]),
"primaryType": repr(self),
"message": {
key: getattr(self, key)
for key in fields(self.__class__)
if not key.startswith("_") or not key.endswith("_")
},
}
def __getitem__(self, key: str) -> Any:
if key in EIP712_BODY_FIELDS:
return self._body_[key]
return super().__getitem__(key)
@property
def signable_message(self) -> SignableMessage:
"""
The current message as a :class:`SignableMessage` named tuple instance.
**NOTE**: The 0x19 prefix is NOT included.
"""
domain = _prepare_data_for_hashing(self._domain_["domain"])
types = _prepare_data_for_hashing(self._types_)
message = _prepare_data_for_hashing(self._body_["message"])
return SignableMessage(
HexBytes(1),
HexBytes(hash_domain(domain)),
HexBytes(hash_eip712_message(types, message)),
)
|
(_name_: Optional[str] = None, _version_: Optional[str] = None, _chainId_: Optional[int] = None, _verifyingContract_: Optional[str] = None, _salt_: Optional[bytes] = None, *args, **kwargs)
|
55,005 |
dataclassy.dataclass
|
__eq__
| null |
def __eq__(self: DataClass, other: DataClass):
return type(self) is type(other) and self.__tuple__ == other.__tuple__
|
(self: Any, other: Any)
|
55,006 |
eip712.messages
|
__getitem__
| null |
def __getitem__(self, key: str) -> Any:
if key in EIP712_BODY_FIELDS:
return self._body_[key]
return super().__getitem__(key)
|
(self, key: str) -> Any
|
55,008 |
dataclassy.dataclass
|
__iter__
| null |
def __iter__(self):
return iter(self.__tuple__)
|
(self)
|
55,009 |
eip712.messages
|
__post_init__
| null |
def __post_init__(self):
# At least one of the header fields must be in the EIP712 message header
if not any(getattr(self, f"_{field}_") for field in EIP712_DOMAIN_FIELDS):
raise ValidationError(
f"EIP712 Message definition '{repr(self)}' must define "
f"at least one of: _{'_, _'.join(EIP712_DOMAIN_FIELDS)}_"
)
|
(self)
|
55,010 |
eip712.messages
|
__repr__
| null |
def __repr__(self) -> str:
return self.__class__.__name__
|
(self) -> str
|
55,011 |
eip712.messages
|
EIP712Type
|
Dataclass for `EIP-712 <https://eips.ethereum.org/EIPS/eip-712>`__ structured data types
(i.e. the contents of an :class:`EIP712Message`).
|
class EIP712Type:
"""
Dataclass for `EIP-712 <https://eips.ethereum.org/EIPS/eip-712>`__ structured data types
(i.e. the contents of an :class:`EIP712Message`).
"""
def __repr__(self) -> str:
return self.__class__.__name__
@property
def _types_(self) -> dict:
"""
Recursively built ``dict`` (name of type ``->`` list of subtypes) of
the underlying fields' types.
"""
types: Dict[str, list] = {repr(self): []}
for field in fields(self.__class__):
value = getattr(self, field)
if isinstance(value, EIP712Type):
types[repr(self)].append({"name": field, "type": repr(value)})
types.update(value._types_)
else:
# TODO: Use proper ABI typing, not strings
field_type = self.__annotations__[field]
if isinstance(field_type, str):
if not is_encodable_type(field_type):
raise ValidationError(f"'{field}: {field_type}' is not a valid ABI type")
elif issubclass(field_type, EIP712Type):
field_type = repr(field_type)
else:
raise ValidationError(
f"'{field}' type annotation must either be a subclass of "
f"`EIP712Type` or valid ABI Type string, not {field_type.__name__}"
)
types[repr(self)].append({"name": field, "type": field_type})
return types
def __getitem__(self, key: str) -> Any:
if (key.startswith("_") and key.endswith("_")) or key not in fields(self.__class__):
raise KeyError("Cannot look up header fields or other attributes this way")
return getattr(self, key)
|
()
|
55,013 |
eip712.messages
|
__getitem__
| null |
def __getitem__(self, key: str) -> Any:
if (key.startswith("_") and key.endswith("_")) or key not in fields(self.__class__):
raise KeyError("Cannot look up header fields or other attributes this way")
return getattr(self, key)
|
(self, key: str) -> Any
|
55,017 |
coreapi.codecs.base
|
BaseCodec
| null |
class BaseCodec(itypes.Object):
media_type = None
# We don't implement stubs, to ensure that we can check which of these
# two operations a codec supports. For example:
# `if hasattr(codec, 'decode'): ...`
# def decode(self, bytestring, **options):
# pass
# def encode(self, document, **options):
# pass
# The following will be removed at some point, most likely in a 2.1 release:
def dump(self, *args, **kwargs):
# Fallback for v1.x interface
return self.encode(*args, **kwargs)
def load(self, *args, **kwargs):
# Fallback for v1.x interface
return self.decode(*args, **kwargs)
@property
def supports(self):
# Fallback for v1.x interface.
if '+' not in self.media_type:
return ['data']
ret = []
if hasattr(self, 'encode'):
ret.append('encoding')
if hasattr(self, 'decode'):
ret.append('decoding')
return ret
def get_media_types(self):
# Fallback, while transitioning from `application/vnd.coreapi+json`
# to simply `application/coreapi+json`.
if hasattr(self, 'media_types'):
return list(self.media_types)
return [self.media_type]
|
()
|
55,018 |
itypes
|
__setattr__
| null |
def __setattr__(self, key, value):
if key.startswith('_'):
return object.__setattr__(self, key, value)
msg = "'%s' object doesn't support property assignment."
raise TypeError(msg % self.__class__.__name__)
|
(self, key, value)
|
55,019 |
coreapi.codecs.base
|
dump
| null |
def dump(self, *args, **kwargs):
# Fallback for v1.x interface
return self.encode(*args, **kwargs)
|
(self, *args, **kwargs)
|
55,020 |
coreapi.codecs.base
|
get_media_types
| null |
def get_media_types(self):
# Fallback, while transitioning from `application/vnd.coreapi+json`
# to simply `application/coreapi+json`.
if hasattr(self, 'media_types'):
return list(self.media_types)
return [self.media_type]
|
(self)
|
55,021 |
coreapi.codecs.base
|
load
| null |
def load(self, *args, **kwargs):
# Fallback for v1.x interface
return self.decode(*args, **kwargs)
|
(self, *args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.