python_code
stringlengths 0
258k
|
---|
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
|
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError, ConnectionError
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
# httplib doesn't like it when we include brackets in ipv6 addresses
self.host = host.strip('[]')
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close():
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
if conn:
conn.close()
conn = None
raise SSLError(e)
except SSLError:
# Treat SSLError separately from BaseSSLError to preserve
# traceback.
if conn:
conn.close()
conn = None
raise
except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
if isinstance(e, SocketError) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
|
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
|
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
|
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning
SSLContext = None
HAS_SNI = False
create_default_context = None
import errno
import warnings
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, location):
self.ca_certs = location
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. For more information, see '
'https://urllib3.readthedocs.org/en/latest/security.html'
'#insecureplatformwarning.',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1,
32: sha256,
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None):
"""
All arguments except for server_hostname and ssl_context have the same
meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs:
try:
context.load_verify_locations(ca_certs)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
|
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
|
'''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
from __future__ import absolute_import
from . import ssl_match_hostname
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
"""Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
# -*- coding: utf-8 -*-
#
# GHC Users Guide documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
import sys
import os
import sphinx_rtd_theme
# Support for :base-ref:, etc.
sys.path.insert(0, os.path.abspath('.'))
import cabaldomain
version = "1.25"
extensions = ['sphinx.ext.extlinks']
templates_path = ['_templates']
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
master_doc = 'index'
# extlinks -- see http://www.sphinx-doc.org/en/stable/ext/extlinks.html
extlinks = {
'issue': ('https://github.com/haskell/cabal/issues/%s', '#'),
'ghc-wiki': ('http://ghc.haskell.org/trac/ghc/wiki/%s', ''),
'ghc-ticket': ('http://ghc.haskell.org/trac/ghc/ticket/%s', 'GHC #'),
'hackage-pkg': ('http://hackage.haskell.org/package/%s', ''),
}
# General information about the project.
project = u'Cabal'
copyright = u'2003-2017, Cabal Team'
# N.B. version comes from ghc_config
release = version # The full version, including alpha/beta/rc tags.
# Syntax highlighting
highlight_language = 'cabal'
#pygments_style = 'tango'
primary_domain = 'cabal'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build', "*.gen.rst"]
# -- Options for HTML output ---------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Cabal <release> User's Guide"
html_short_title = "Cabal %s User's Guide" % release
html_logo = 'images/Cabal-dark.png'
html_static_path = ['images']
# Convert quotes and dashes to typographically correct entities
html_use_smartypants = True
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CabalUsersGuide'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
'inputenc': '',
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\usepackage{makeidx}
\setsansfont{DejaVu Sans}
\setromanfont{DejaVu Serif}
\setmonofont{DejaVu Sans Mono}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'users_guide.tex', u'GHC Users Guide Documentation',
u'GHC Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'images/logo.pdf'
# If true, show page references after internal links.
latex_show_pagerefs = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('cabal', 'cabal', 'The Haskell Cabal', 'The Cabal Team', 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CabalUsersGuide', u'Cabal Users Guide',
u'Cabal Team', 'CabalUsersGuide', 'The Haskell Cabal.',
'Compilers'),
]
from sphinx import addnodes
from docutils import nodes
def parse_ghci_cmd(env, sig, signode):
name = sig.split(';')[0]
sig = sig.replace(';', '')
signode += addnodes.desc_name(name, sig)
return name
def parse_flag(env, sig, signode):
import re
names = []
for i, flag in enumerate(sig.split(',')):
flag = flag.strip()
sep = '='
parts = flag.split('=')
if len(parts) == 1:
sep=' '
parts = flag.split()
if len(parts) == 0: continue
name = parts[0]
names.append(name)
sig = sep + ' '.join(parts[1:])
sig = re.sub(ur'<([-a-zA-Z ]+)>', ur'⟨\1⟩', sig)
if i > 0:
signode += addnodes.desc_name(', ', ', ')
signode += addnodes.desc_name(name, name)
if len(sig) > 0:
signode += addnodes.desc_addname(sig, sig)
return names[0]
def setup(app):
from sphinx.util.docfields import Field, TypedField
increase_python_stack()
# the :ghci-cmd: directive used in ghci.rst
app.add_object_type('ghci-cmd', 'ghci-cmd',
parse_node=parse_ghci_cmd,
objname='GHCi command',
indextemplate='pair: %s; GHCi command')
app.add_object_type('ghc-flag', 'ghc-flag',
objname='GHC command-line option',
parse_node=parse_flag,
indextemplate='pair: %s; GHC option',
doc_field_types=[
Field('since', label='Introduced in GHC version', names=['since']),
Field('default', label='Default value', names=['default']),
Field('static')
])
app.add_object_type('rts-flag', 'rts-flag',
objname='runtime system command-line option',
parse_node=parse_flag,
indextemplate='pair: %s; RTS option',
doc_field_types=[
Field('since', label='Introduced in GHC version', names=['since']),
])
cabaldomain.setup(app)
def increase_python_stack():
# Workaround sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Default python allows recursion depth of 1000 calls.
sys.setrecursionlimit(10000)
|
# -*- coding: utf-8 -*-
'''
Sphinx domain for documenting all things cabal
The main reason to use this instead of adding object types to std domain
is the ability to generate nice 'Reference' page and also provide some meta
data for objects described with directives described here.
Most directives have at least following optional arguments
`:since: 1.23`
version of Cabal in which feature was added.
`:deprecated: 1.23`
`:deprecated:`
Feature was deprecatead, and optionally since which version.
`:synopsis: Short desc`
Text used as short description on reference page.
Added directives
.. rst:directive:: .. cabal::cfg-section
Describes a package.cabal section, such as library or exectuble.
All following `pkg-field` directives will add section name
to their fields name for disambiguating duplicates.
You can reset the section disambguation with with `.. pkg-section:: None`.
.. rst::role:: pkg-section
References section added by `.. pkg-section`
.. rst:directive:: .. cabal::pkg-field
Describes a package.cabal field.
Can have a :default: field. Will group on reference page under pkg-section
if set and parent header otherwise.
.. rst::role:: pkg-field
References field added by `.. pkg-field`, fields can be disambiguated
with section name `:pkg-field:`section:field`.
.. rst:directive:: .. cabal:cfg-section::
Same as `.. cabal::pkg-section` but does not produce any visible output
currently unused.
.. rst:directive:: .. cabal:cfg-field::
Describes a project.cabal field.
Can have multiple arguments, if arguments start with '-' then it is treated
as a cabal flag.
Can have a :default: field. Will group on reference page under pkg-section
if set and parent header otherwise.
.. rst::role:: cfg-field
References field added by `.. cfg-field`.
.. rst::role:: cfg-flag
References flag added by `.. cfg-field`.
All roles can be supplied with title as in standard sphinx references::
:pkg-field:`Build dependencies<build-depends>`
To be done:
- Directives for describing executables, their subcommands and flags.
These should act in a way similar to `.. std::option` directive, but with
extra meta. And should also end up in reference.
At least setup and 'new-build` subcommands should get special directvies
- Improve rendering of flags in `.. cfg-field::` directive. It should be
possible without copy-pasting code from sphinx.directives.ObjectDescription
by examining result of ObjectDescription.run and inserting flags into
desc_content node.
Alternatively Or `.. flags::` sub-directive can be added which will be aware
of parent `.. cfg-field` directive.
- With same ObjectDescription.run trick as above, render since and deprecated
info same way as standard object fields, and use fancy rendering only on
references page.
- Add 'since_version` config value to sphinx env and use it to decide if
version meta info should be rendered on reference page and thus reduce some
clutter.
Can also be used to generate 'Whats new' reference page
'''
import re
from docutils import nodes
from docutils.parsers.rst import Directive, directives, roles
import pygments.lexer as lexer
import pygments.token as token
from distutils.version import StrictVersion
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import ObjType, Domain, Index
from sphinx.domains.std import StandardDomain
from sphinx.locale import l_, _
from sphinx.roles import XRefRole
from sphinx.util.docfields import Field, DocFieldTransformer
from sphinx.util.nodes import make_refnode
def parse_deprecated(txt):
if txt is None:
return True
try:
return StrictVersion(txt)
except ValueError:
return True
def parse_flag(env, sig, signode):
import re
names = []
for i, flag in enumerate(sig.split(',')):
flag = flag.strip()
sep = '='
parts = flag.split('=')
if len(parts) == 1:
sep=' '
parts = flag.split()
if len(parts) == 0: continue
name = parts[0]
names.append(name)
sig = sep + ' '.join(parts[1:])
sig = re.sub(ur'<([-a-zA-Z ]+)>', ur'⟨\1⟩', sig)
if i > 0:
signode += addnodes.desc_name(', ', ', ')
signode += addnodes.desc_name(name, name)
if len(sig) > 0:
signode += addnodes.desc_addname(sig, sig)
return names[0]
class Meta(object):
'''
Meta data associated with object
'''
def __init__(self,
since=None,
deprecated=None,
synopsis=None,
title=None,
section=None,
index=0):
self.since = since
self.deprecated = deprecated
self.synopsis = synopsis
self.title = title
self.section = section
self.index = index
def find_section_title(parent):
'''
Find current section id and title if possible
'''
while parent is not None:
if isinstance(parent, nodes.section):
break
parent = parent.parent
if parent is None:
return None
section_id = parent['ids'][0]
section_name = parent['names'][0]
for kid in parent:
if isinstance(kid, nodes.title):
return kid.astext(), section_id
print section_name, section_id
return section_name, section_id
class CabalSection(Directive):
"""
Marks section to which following objects belong, used to disambiguate
references to fields and flags which can have similar names
Does not generate any output besides anchor.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'name': lambda x: x,
'deprecated': parse_deprecated,
'since' : StrictVersion,
'synopsis' : lambda x:x,
}
section_key = 'cabal:pkg-section'
target_prefix = 'pkg-section-'
indextemplate = ''
indextype = 'pair'
def get_index_entry(self, name):
return self.indextemplate % name
def run(self):
env = self.state.document.settings.env
section = self.arguments[0].strip()
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
if section == 'None':
env.ref_context.pop(self.section_key, None)
return []
env.ref_context[self.section_key] = section
targetname = self.target_prefix + section
node = nodes.target('', '', ids=[targetname])
self.state.document.note_explicit_target(node)
indexentry = self.get_index_entry(section)
inode = addnodes.index(
entries = [
(self.indextype, indexentry, targetname, '', None)])
# find title of parent section node
title = find_section_title(self.state.parent)
data_key = CabalDomain.types[self.objtype]
# find how many sections in this document were added
num = env.domaindata['cabal']['index-num'].get(env.docname, 0)
env.domaindata['cabal']['index-num'][env.docname] = num + 1
meta = Meta(since=self.options.get('since'),
deprecated=self.options.get('deprecated'),
synopsis=self.options.get('synopsis'),
index = num,
title = title)
store = env.domaindata['cabal'][data_key]
if not section in store:
store[section] = env.docname, targetname, meta
return [inode, node]
class CabalObject(ObjectDescription):
option_spec = {
'noindex' : directives.flag,
'deprecated': parse_deprecated,
'since' : StrictVersion,
'synopsis' : lambda x:x
}
# node attribute marking which section field belongs to
section_key = ''
# template for index, it is passed a field name as argument
# used by default deg_index_entry method
indextemplate = ''
def get_meta(self):
'''
Collect meta data for fields
Reads optional arguments passed to directive and also
tries to find current section title and adds it as section
'''
env = self.state.document.settings.env
# find title of current section, will group references page by it
num = env.domaindata['cabal']['index-num'].get(env.docname, 0)
env.domaindata['cabal']['index-num'][env.docname] = num + 1
title = find_section_title(self.state.parent)
return Meta(since=self.options.get('since'),
deprecated=self.options.get('deprecated'),
title=title,
index = num,
synopsis=self.options.get('synopsis'))
def get_env_key(self, env, name):
'''
Should return a key used to reference this field and key in domain
data to store this object
'''
section = self.env.ref_context.get(self.section_key)
store = CabalDomain.types[self.objtype]
return (section, name), store
def get_index_entry(self, env, name):
'''
Should return index entry and achor
By default uses indextemplate attribute to generate name and
index entry by joining directive name, section and field name
'''
section = self.env.ref_context.get(self.section_key)
if section is not None:
parts = (self.objtype, section, name)
indexentry = self.indextemplate % (section + ':' + name)
else:
parts = (self.objtype, name)
indexentry = self.indextemplate % name
targetname = '-'.join(parts)
return indexentry, targetname
def add_target_and_index(self, name, sig, signode):
'''
As in sphinx.directive.ObjectDescription
By default adds 'pair' index as returned by get_index_entry and
stores object data into domain data store as returned by get_env_data
'''
env = self.state.document.settings.env
indexentry, targetname = self.get_index_entry(self, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
inode = addnodes.index(
entries=[('pair', indexentry, targetname, '', None)])
signode.insert(0, inode)
key, store = self.get_env_key(env, name)
env.domaindata['cabal'][store][key] = env.docname, targetname, self.cabal_meta
def run(self):
self.cabal_meta = self.get_meta()
result = super(CabalObject, self).run()
if self.cabal_meta.since is not None \
or self.cabal_meta.deprecated is not None:
#find content part of description
for item in result:
if isinstance(item, addnodes.desc):
desc = item
break
else:
return result
for item in desc:
if isinstance(item, addnodes.desc_content):
contents = item
break
else:
return result
# find exsting field list and add to it
# or create new one
for item in contents:
if isinstance(item, nodes.field_list):
field_list = item
break
else:
field_list = nodes.field_list('')
contents.insert(0, field_list)
if self.cabal_meta.since is not None:
#docutils horror
field = nodes.field('')
field_name = nodes.field_name('Since', 'Since')
since = 'Cabal ' + str(self.cabal_meta.since)
field_body = nodes.field_body(since, nodes.paragraph(since, since))
field += field_name
field += field_body
field_list.insert(0, field)
if self.cabal_meta.deprecated is not None:
field = nodes.field('')
field_name = nodes.field_name('Deprecated', 'Deprecated')
if isinstance(self.cabal_meta.deprecated, StrictVersion):
since = 'Cabal ' + str(self.cabal_meta.deprecated)
else:
since = ''
field_body = nodes.field_body(since, nodes.paragraph(since, since))
field += field_name
field += field_body
field_list.insert(0, field)
return result
class CabalPackageSection(CabalObject):
"""
Cabal section in package.cabal file
"""
section_key = 'cabal:pkg-section'
indextemplate = '%s; package.cabal section'
def handle_signature(self, sig, signode):
'''
As in sphinx.directives.ObjectDescription
By default make an object description from name and adding
either deprecated or since as annotation.
'''
env = self.state.document.settings.env
sig = sig.strip()
parts = sig.split(' ',1)
name = parts[0]
signode += addnodes.desc_name(name, name)
signode += addnodes.desc_addname(' ', ' ')
if len(parts) > 1:
rest = parts[1].strip()
signode += addnodes.desc_annotation(rest, rest)
return name
def get_env_key(self, env, name):
store = CabalDomain.types[self.objtype]
return name, store
def run(self):
env = self.state.document.settings.env
section = self.arguments[0].strip().split(' ',1)[0]
if section == 'None':
env.ref_context.pop('cabal:pkg-section', None)
return []
env.ref_context['cabal:pkg-section'] = section
return super(CabalPackageSection, self).run()
class CabalField(CabalObject):
'''
Base for fields in *.cabal files
'''
option_spec = {
'noindex' : directives.flag,
'deprecated': parse_deprecated,
'since' : StrictVersion,
'synopsis' : lambda x:x
}
doc_field_types = [
Field('default', label='Default value', names=['default'], has_arg=False)
]
def handle_signature(self, sig, signode):
'''
As in sphinx.directives.ObjectDescription
By default make an object description from name and adding
either deprecated or since as annotation.
'''
env = self.state.document.settings.env
sig = sig.strip()
parts = sig.split(':',1)
name = parts[0]
signode += addnodes.desc_name(name, name)
signode += addnodes.desc_addname(': ', ': ')
if len(parts) > 1:
rest = parts[1].strip()
signode += addnodes.desc_annotation(rest, rest)
return name
class CabalPackageField(CabalField):
'''
Describes section in package.cabal file
'''
section_key = 'cabal:pkg-section'
indextemplate = '%s; package.cabal field'
class CabalFieldXRef(XRefRole):
'''
Cross ref node for all kinds of fields
Gets section_key entry from context and stores it on node, so it can
later be used by CabalDomain.resolve_xref to find target for reference to
this
'''
section_key = 'cabal:pkg-section'
def process_link(self, env, refnode, has_explicit_title, title, target):
parts = target.split(':',1)
if len(parts) == 2:
section, target = parts
section = section.strip()
target = target.strip()
refnode[self.section_key] = section
else:
refnode[self.section_key] = env.ref_context.get(self.section_key)
return title, target
#
# Directives for config files.
#
class CabalPackageFieldXRef(CabalFieldXRef):
'''
Role referencing project.cabal section
'''
section_key = 'cabal:pkg-section'
class CabalConfigSection(CabalSection):
"""
Marks section in package.cabal file
"""
indextemplate = '%s; project.cabal section'
section_key = 'cabal:cfg-section'
target_prefix = 'cfg-section-'
class ConfigField(CabalField):
section_key = 'cabal:cfg-section'
indextemplate = '%s ; cabal project option'
def handle_signature(self, sig, signode):
sig = sig.strip()
if sig.startswith('-'):
name = parse_flag(self, sig, signode)
else:
name = super(ConfigField, self).handle_signature(sig, signode)
return name
def get_index_entry(self, env, name):
if name.startswith('-'):
section = self.env.ref_context.get(self.section_key)
if section is not None:
parts = ('cfg-flag', section, name)
indexname = section + ':' + name
else:
parts = ('cfg-flag', name)
indexname = name
indexentry = name + '; cabal project option'
targetname = '-'.join(parts)
return indexentry, targetname
else:
return super(ConfigField,self).get_index_entry(env, name)
def get_env_key(self, env, name):
section = self.env.ref_context.get(self.section_key)
if name.startswith('-'):
return (section, name), 'cfg-flags'
return (section, name), 'cfg-fields'
class CabalConfigFieldXRef(CabalFieldXRef):
section_key = 'cabal:cfg-section'
#
# Cabal domain
#
class ConfigFieldIndex(Index):
name = 'projectindex'
localname = "Cabal reference"
shortname = "Reference"
class Entry(object):
def __init__(self, typ, name, doc, anchor, meta):
self.type = typ
self.name = name
self.doc = doc
self.anchor = anchor
self.meta = meta
def _gather_data(self, obj_types):
'''
Gather objects and return [(title, [Entry])]
'''
def massage(typ, datum):
name, (doc, anchor, meta) = datum
return self.Entry(typ, name, doc, anchor, meta)
fields = []
for typ in obj_types:
store = CabalDomain.types[typ]
fields += [massage(typ, x)
for x in self.domain.data[store].items()]
fields.sort(key=lambda x: (x.doc, x.meta.index))
if len(fields) == 0:
return []
result = []
current = []
current_title = fields[0].meta.title
for field in fields:
if field.meta.title != current_title:
result.append((current_title, current))
current = []
current_title = field.meta.title
current.append(field)
result.append((current_title, current))
return result
def generate(self, docnames=None):
'''
Try to group entries such that if entry has a section then put it
into same group.
Otherwise group it under same `title`.
Try to keep in same order as it was defined.
sort by (document, index)
group on (document, doc_section)
TODO: Check how to extract section numbers from (document,doc_section)
and add it as annotation to titles
'''
# (title, section store, fields store)
entries = [('project.cabal fields', 'cfg-section', 'cfg-field'),
('cabal project flags', 'cfg-section', 'cfg-flag'),
('package.cabal fields', 'pkg-section', 'pkg-field')]
result = []
for label, section_key, key in entries:
data = self._gather_data([section_key, key])
references = []
for section, entries in data:
if section is None:
elem_type = 0 # Normal entry
else:
elem_type = 2 # sub_entry
assert len(entries) != 0
docname = entries[0].doc
if section is not None:
section_title, section_anchor = section
references.append(
(section_title, 1, docname, section_anchor, '', '', ''))
for entry in entries:
#todo deal with if
if isinstance(entry.name, tuple):
name = entry.name[1]
else:
name = entry.name
meta = entry.meta
extra = render_meta(meta)
descr = meta.synopsis if meta.synopsis is not None else ''
field = (name, elem_type, docname,
entry.anchor, extra, '', descr)
references.append(field)
result.append((label, references))
return result, False
def make_data_keys(typ, target, node):
'''
Returns a list of keys to search for targets of this type
in domain data.
Used for resolving references
'''
if typ == 'pkg-field':
section = node.get('cabal:pkg-section')
return [(section, target),
(None, target)]
elif typ in ('cfg-field', 'cfg-flag'):
section = node.get('cabal:cfg-section')
return [(section, target), (None, target)]
else:
return [target]
def render_deprecated(deprecated):
if isinstance(deprecated, StrictVersion):
return 'deprecated since: '+str(deprecated)
else:
return 'deprecated'
def render_meta(meta):
'''
Render meta as short text
Will render either deprecated or since info
'''
if meta.deprecated is not None:
return render_deprecated(meta.deprecated)
elif meta.since is not None:
return 'since version: ' + str(meta.since)
else:
return ''
def render_meta_title(meta):
'''
Render meta as suitable to use in titles
'''
rendered = render_meta(meta)
if rendered != '':
return '(' + rendered + ')'
return ''
def make_title(typ, key, meta):
'''
Render title of an object (section, field or flag)
'''
if typ == 'pkg-section':
return "package.cabal " + key + " section " + render_meta_title(meta)
elif typ == 'pkg-field':
section, name = key
if section is not None:
base = "package.cabal " + section + " section " + name + ": field"
else:
base = "package.cabal " + name + " field"
return base + render_meta_title(meta)
elif typ == 'cfg-section':
return "project.cabal " + key + " section " + render_meta_title(meta)
elif typ == 'cfg-field':
section, name = key
return "project.cabal " + name + " field " + render_meta_title(meta)
elif typ == 'cfg-flag':
section, name = key
return "cabal flag " + name + " " + render_meta_title(meta)
else:
raise ValueError("Unknown type: " + typ)
def make_full_name(typ, key, meta):
'''
Return an anchor name for object type
'''
if typ == 'pkg-section':
return 'pkg-section-' + key
elif typ == 'pkg-field':
section, name = key
if section is not None:
return '-'.join(('pkg-field',section, name))
else:
return 'pkg-field-' + name
elif typ == 'cfg-field':
return 'cfg-field-' + key
else:
raise ValueError('Unknown object type: ' + typ)
class CabalDomain(Domain):
'''
Sphinx domain for cabal
needs Domain.merge_doc for parallel building, just union all dicts
'''
name = 'cabal'
label = 'Cabal'
object_types = {
'pkg-section': ObjType(l_('pkg-section'), 'pkg-section'),
'pkg-field' : ObjType(l_('pkg-field') , 'pkg-field' ),
'cfg-section': ObjType(l_('cfg-section'), 'cfg-section'),
'cfg-field' : ObjType(l_('cfg-field') , 'cfg-field' ),
}
directives = {
'pkg-section': CabalPackageSection,
'pkg-field' : CabalPackageField,
'cfg-section': CabalConfigSection,
'cfg-field' : ConfigField,
}
roles = {
'pkg-section': XRefRole(warn_dangling=True),
'pkg-field' : CabalPackageFieldXRef(warn_dangling=True),
'cfg-section': XRefRole(warn_dangling=True),
'cfg-field' : CabalConfigFieldXRef(warn_dangling=True),
'cfg-flag' : CabalConfigFieldXRef(warn_dangling=True),
}
initial_data = {
'pkg-sections': {},
'pkg-fields' : {},
'cfg-sections': {},
'index-num' : {}, #per document number of objects
# used to order references page
'cfg-fields' : {},
'cfg-flags' : {},
}
indices = [
ConfigFieldIndex
]
types = {
'pkg-section': 'pkg-sections',
'pkg-field' : 'pkg-fields',
'cfg-section': 'cfg-sections',
'cfg-field' : 'cfg-fields',
'cfg-flag' : 'cfg-flags',
}
def clear_doc(self, docname):
for k in ['pkg-sections', 'pkg-fields', 'cfg-sections',
'cfg-fields', 'cfg-flags']:
for name, (fn, _, _) in self.data[k].items():
if fn == docname:
del self.data[k][comname]
try:
del self.data['index-num'][docname]
except KeyError:
pass
def resolve_xref(self, env, fromdocname, builder, type, target, node, contnode):
objtypes = self.objtypes_for_role(type)
for typ, key in ((typ, key)
for typ in objtypes
for key in make_data_keys(typ, target, node)):
try:
data = env.domaindata['cabal'][self.types[typ]][key]
except KeyError:
continue
doc, ref, meta = data
title = make_title(typ, key, meta)
return make_refnode(builder, fromdocname, doc, ref, contnode, title)
def get_objects(self):
'''
Used for search functionality
'''
for typ in ['pkg-section', 'pkg-field',
'cfg-section', 'cfg-field', 'cfg-flag']:
key = self.types[typ]
for name, (fn, target, meta) in self.data[key].items():
title = make_title(typ, name, meta)
yield title, title, typ, fn, target, 0
class CabalLexer(lexer.RegexLexer):
'''
Basic cabal lexer, does not try to be smart
'''
name = 'Cabal'
aliases = ['cabal']
filenames = ['.cabal']
flags = re.MULTILINE
tokens = {
'root' : [
(r'^(\s*)(--.*)$', lexer.bygroups(token.Whitespace, token.Comment.Single)),
# key: value
(r'^(\s*)([\w\-_]+)(:)',
lexer.bygroups(token.Whitespace, token.Keyword, token.Punctuation)),
(r'^([\w\-_]+)', token.Keyword), # library, executable, flag etc.
(r'[^\S\n]+', token.Text),
(r'&&|\|\||==|<=|\^>=|>=|<|>', token.Operator),
(r',|:|{|}', token.Punctuation),
(r'.', token.Text)
],
}
def setup(app):
app.add_domain(CabalDomain)
app.add_lexer('cabal', CabalLexer())
|
#! /usr/bin/env python
#
# Copyright (C) 2010 Joel Rosdahl
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from optparse import OptionParser
from os import access, environ, mkdir, getpid, X_OK
from os.path import (
abspath, basename, exists, isabs, isfile, join as joinpath, realpath,
splitext)
from shutil import rmtree
from subprocess import call
from time import time
import sys
USAGE = """%prog [options] <compiler> [compiler options] <source code file>"""
DESCRIPTION = """\
This program compiles a C/C++ file with/without ccache a number of times to get
some idea of ccache speedup and overhead in the preprocessor and direct modes.
The arguments to the program should be the compiler, optionally followed by
compiler options, and finally the source file to compile. The compiler options
must not contain -c or -o as these options will be added later. Example:
./perf.py gcc -g -O2 -Idir file.c
"""
DEFAULT_CCACHE = "./ccache"
DEFAULT_DIRECTORY = "."
DEFAULT_HIT_FACTOR = 1
DEFAULT_TIMES = 30
PHASES = [
"without ccache",
"with ccache, preprocessor mode, cache miss",
"with ccache, preprocessor mode, cache hit",
"with ccache, direct mode, cache miss",
"with ccache, direct mode, cache hit"]
verbose = False
def progress(msg):
if verbose:
sys.stderr.write(msg)
sys.stderr.flush()
def recreate_dir(x):
if exists(x):
rmtree(x)
mkdir(x)
def test(tmp_dir, options, compiler_args, source_file):
src_dir = "%s/src" % tmp_dir
obj_dir = "%s/obj" % tmp_dir
ccache_dir = "%s/ccache" % tmp_dir
mkdir(src_dir)
mkdir(obj_dir)
compiler_args += ["-c", "-o"]
extension = splitext(source_file)[1]
hit_factor = options.hit_factor
times = options.times
progress("Creating source code\n")
for i in range(times):
fp = open("%s/%d%s" % (src_dir, i, extension), "w")
fp.write(open(source_file).read())
fp.write("\nint ccache_perf_test_%d;\n" % i)
fp.close()
environment = {"CCACHE_DIR": ccache_dir, "PATH": environ["PATH"]}
environment["CCACHE_COMPILERCHECK"] = options.compilercheck
if options.compression:
environment["CCACHE_COMPRESS"] = "1"
if options.hardlink:
environment["CCACHE_HARDLINK"] = "1"
if options.nostats:
environment["CCACHE_NOSTATS"] = "1"
result = [None] * len(PHASES)
def run(i, use_ccache, use_direct):
obj = "%s/%d.o" % (obj_dir, i)
src = "%s/%d%s" % (src_dir, i, extension)
if use_ccache:
args = [options.ccache]
else:
args = []
args += compiler_args + [obj, src]
env = environment.copy()
if not use_direct:
env["CCACHE_NODIRECT"] = "1"
if call(args, env=env) != 0:
sys.stderr.write(
'Error running "%s"; please correct\n' % " ".join(args))
sys.exit(1)
# Warm up the disk cache.
recreate_dir(ccache_dir)
recreate_dir(obj_dir)
run(0, True, True)
###########################################################################
# Without ccache
recreate_dir(ccache_dir)
recreate_dir(obj_dir)
progress("Compiling %s\n" % PHASES[0])
t0 = time()
for i in range(times):
run(i, False, False)
progress(".")
result[0] = time() - t0
progress("\n")
###########################################################################
# Preprocessor mode
recreate_dir(ccache_dir)
recreate_dir(obj_dir)
progress("Compiling %s\n" % PHASES[1])
t0 = time()
for i in range(times):
run(i, True, False)
progress(".")
result[1] = time() - t0
progress("\n")
recreate_dir(obj_dir)
progress("Compiling %s\n" % PHASES[2])
t0 = time()
for j in range(hit_factor):
for i in range(times):
run(i, True, False)
progress(".")
result[2] = (time() - t0) / hit_factor
progress("\n")
###########################################################################
# Direct mode
recreate_dir(ccache_dir)
recreate_dir(obj_dir)
progress("Compiling %s\n" % PHASES[3])
t0 = time()
for i in range(times):
run(i, True, True)
progress(".")
result[3] = time() - t0
progress("\n")
recreate_dir(obj_dir)
progress("Compiling %s\n" % PHASES[4])
t0 = time()
for j in range(hit_factor):
for i in range(times):
run(i, True, True)
progress(".")
result[4] = (time() - t0) / hit_factor
progress("\n")
return result
def print_result_as_text(result):
for (i, x) in enumerate(PHASES):
print "%-43s %6.2f s (%6.2f %%) (%5.2f x)" % (
x.capitalize() + ":",
result[i],
100 * (result[i] / result[0]),
result[0] / result[i])
def print_result_as_xml(result):
print '<?xml version="1.0" encoding="UTF-8"?>'
print "<ccache-perf>"
for (i, x) in enumerate(PHASES):
print "<measurement>"
print "<name>%s</name>" % x.capitalize()
print "<seconds>%.2f</seconds>" % result[i]
print "<percent>%.2f</percent>" % (100 * (result[i] / result[0]))
print "<times>%.2f</times>" % (result[0] / result[i])
print "</measurement>"
print "</ccache-perf>"
def on_off(x):
return "on" if x else "off"
def find_in_path(cmd):
if isabs(cmd):
return cmd
else:
for path in environ["PATH"].split(":"):
p = joinpath(path, cmd)
if isfile(p) and access(p, X_OK):
return p
return None
def main(argv):
op = OptionParser(usage=USAGE, description=DESCRIPTION)
op.disable_interspersed_args()
op.add_option(
"--ccache",
help="location of ccache (default: %s)" % DEFAULT_CCACHE)
op.add_option(
"--compilercheck",
help="specify compilercheck (default: mtime)")
op.add_option(
"--compression",
help="use compression",
action="store_true")
op.add_option(
"-d", "--directory",
help="where to create the temporary directory with the cache and" \
" other files (default: %s)" \
% DEFAULT_DIRECTORY)
op.add_option(
"--hardlink",
help="use hard links",
action="store_true")
op.add_option(
"--hit-factor",
help="how many times more to compile the file for cache hits (default: %d)" \
% DEFAULT_HIT_FACTOR,
type="int")
op.add_option(
"--nostats",
help="don't write statistics",
action="store_true")
op.add_option(
"-n", "--times",
help="number of times to compile the file (default: %d)" \
% DEFAULT_TIMES,
type="int")
op.add_option(
"-v", "--verbose",
help="print progress messages",
action="store_true")
op.add_option(
"--xml",
help="print result as XML",
action="store_true")
op.set_defaults(
ccache=DEFAULT_CCACHE,
compilercheck="mtime",
directory=DEFAULT_DIRECTORY,
hit_factor=DEFAULT_HIT_FACTOR,
times=DEFAULT_TIMES)
(options, args) = op.parse_args(argv[1:])
if len(args) < 2:
op.error("Missing arguments; pass -h/--help for help")
global verbose
verbose = options.verbose
options.ccache = abspath(options.ccache)
compiler = find_in_path(args[0])
if compiler is None:
op.error("Could not find %s in PATH" % args[0])
if "ccache" in basename(realpath(compiler)):
op.error(
"%s seems to be a symlink to ccache; please specify the path to"
" the real compiler instead" % compiler)
if not options.xml:
print "Compilation command: %s -c -o %s.o" % (
" ".join(args),
splitext(argv[-1])[0])
print "Compilercheck:", options.compilercheck
print "Compression:", on_off(options.compression)
print "Hardlink:", on_off(options.hardlink)
print "Nostats:", on_off(options.nostats)
tmp_dir = "%s/perfdir.%d" % (abspath(options.directory), getpid())
recreate_dir(tmp_dir)
result = test(tmp_dir, options, args[:-1], args[-1])
rmtree(tmp_dir)
if options.xml:
print_result_as_xml(result)
else:
print_result_as_text(result)
main(sys.argv)
|
from setuptools import setup
if __name__ == "__main__":
setup()
|
r"""Run a submission on a single workload.
Example command:
# pylint: disable=line-too-long
python3 submission_runner.py \
--workload=mnist \
--framework=jax \
--submission_path=reference_algorithms/development_algorithms/mnist/mnist_jax/submission.py \
--tuning_ruleset=external \
--tuning_search_space=reference_algorithms/development_algorithms/mnist/tuning_search_space.json \
--num_tuning_trials=3 \
--experiment_dir=/home/znado/experiment_dir \
--experiment_name=baseline
"""
import datetime
import importlib
import json
import os
import struct
import time
from typing import Any, Dict, Optional, Tuple
from absl import app
from absl import flags
from absl import logging
import jax
import tensorflow as tf
import torch
import torch.distributed as dist
from algorithmic_efficiency import checkpoint_utils
from algorithmic_efficiency import halton
from algorithmic_efficiency import logger_utils
from algorithmic_efficiency import random_utils as prng
from algorithmic_efficiency import spec
from algorithmic_efficiency.profiler import PassThroughProfiler
from algorithmic_efficiency.profiler import Profiler
from algorithmic_efficiency.pytorch_utils import pytorch_init
from algorithmic_efficiency.pytorch_utils import pytorch_setup
from algorithmic_efficiency.pytorch_utils import sync_ddp_time
from algorithmic_efficiency.workloads import workloads
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.set_visible_devices([], 'GPU')
# disable only for deepspeech if it works fine for other workloads.
os.environ['XLA_FLAGS'] = '--xla_gpu_enable_triton_gemm=false'
# TODO(znado): make a nicer registry of workloads that lookup in.
BASE_WORKLOADS_DIR = workloads.BASE_WORKLOADS_DIR
# Workload_path will be appended by '_pytorch' or '_jax' automatically.
WORKLOADS = workloads.WORKLOADS
flags.DEFINE_string(
'submission_path',
None,
'The relative path of the Python file containing submission functions. '
'NOTE: the submission dir must have an __init__.py file!')
flags.DEFINE_string(
'workload',
None,
help=f'The name of the workload to run.\n Choices: {list(WORKLOADS.keys())}'
)
flags.DEFINE_enum(
'tuning_ruleset',
'external',
enum_values=['external', 'self'],
help='Which tuning ruleset to use.')
flags.DEFINE_string(
'tuning_search_space',
None,
'The path to the JSON file describing the external tuning search space.')
flags.DEFINE_integer('num_tuning_trials',
1,
'The number of external hyperparameter trials to run.')
flags.DEFINE_string('data_dir', '~/data', 'Dataset location.')
flags.DEFINE_string('imagenet_v2_data_dir',
'~/data',
'Dataset location for ImageNet-v2.')
flags.DEFINE_string('librispeech_tokenizer_vocab_path',
'',
'Location to librispeech tokenizer.')
flags.DEFINE_enum(
'framework',
None,
enum_values=['jax', 'pytorch'],
help='Whether to use Jax or Pytorch for the submission. Controls among '
'other things if the Jax or Numpy RNG library is used for RNG.')
flags.DEFINE_boolean(
'torch_compile',
True,
'Whether to use `torch.compile` to JIT-compile PyTorch code. '
'This will only take effect when `framework`==pytorch.')
flags.DEFINE_string(
'experiment_dir',
None,
'The root directory to store all experiments. '
'It is required and the directory should have '
'an absolute path rather than a relative path.')
flags.DEFINE_string('experiment_name', None, 'Name of the experiment.')
flags.DEFINE_boolean(
'save_intermediate_checkpoints',
True,
'Whether to save any intermediate checkpoints. '
'If False, it will only keep the latest checkpoint.')
flags.DEFINE_boolean('resume_last_run',
None,
'Whether to resume the experiment from its last run.')
flags.DEFINE_boolean(
'append_timestamp',
False,
'If True, the current datetime will be appended to the experiment name. '
'Useful for guaranteeing a unique experiment dir for new runs.')
flags.DEFINE_boolean('use_wandb',
False,
'Whether to use Weights & Biases logging.')
flags.DEFINE_boolean('profile', False, 'Whether to produce profiling output.')
flags.DEFINE_integer('max_global_steps',
None,
'Maximum number of update steps.')
flags.DEFINE_boolean(
'overwrite',
False,
'Whether to overwrite the experiment with identical experiment_dir and'
'experiment_name.')
flags.DEFINE_boolean('save_checkpoints',
True,
'Whether or not to checkpoint the model at every eval.')
FLAGS = flags.FLAGS
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup()
def _get_time():
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def _get_time_ddp():
torch.cuda.synchronize()
t = time.time()
return sync_ddp_time(t, DEVICE)
if USE_PYTORCH_DDP:
get_time = _get_time_ddp
else:
get_time = _get_time
def train_once(
workload: spec.Workload,
global_batch_size: int,
global_eval_batch_size: int,
data_dir: str,
imagenet_v2_data_dir: str,
init_optimizer_state: spec.InitOptimizerFn,
update_params: spec.UpdateParamsFn,
data_selection: spec.DataSelectionFn,
hyperparameters: Optional[spec.Hyperparameters],
rng: spec.RandomState,
profiler: Profiler,
max_global_steps: int = None,
log_dir: Optional[str] = None,
save_checkpoints: Optional[bool] = True
) -> Tuple[spec.Timing, Dict[str, Any]]:
data_rng, opt_init_rng, model_init_rng, rng = prng.split(rng, 4)
# Workload setup.
logging.info('Initializing dataset.')
with profiler.profile('Initializing dataset'):
input_queue = workload._build_input_queue(
data_rng,
'train',
data_dir=data_dir,
global_batch_size=global_batch_size)
logging.info('Initializing model.')
with profiler.profile('Initializing model'):
dropout_rate = None
aux_dropout_rate = None
if hasattr(hyperparameters, 'dropout_rate'):
dropout_rate = hyperparameters.dropout_rate
if hasattr(hyperparameters, 'aux_dropout_rate'):
aux_dropout_rate = hyperparameters.aux_dropout_rate
model_params, model_state = workload.init_model_fn(
model_init_rng, dropout_rate, aux_dropout_rate)
if FLAGS.framework == 'pytorch' and FLAGS.torch_compile:
compile_error_workloads = ['ogbg', 'librispeech_deepspeech', 'wmt']
eager_backend_workloads = ['librispeech_conformer']
aot_eager_backend_workloads = ['criteo1tb']
if FLAGS.workload in compile_error_workloads:
logging.warning(
'These workloads cannot be fully compiled under current '
'PyTorch version. Proceeding without `torch.compile`.')
elif FLAGS.workload in eager_backend_workloads:
logging.warning(
'These workloads cannot be fully compiled under current '
'PyTorch version. Proceeding with `backend=eager`.')
model_params = torch.compile(model_params, backend='eager')
elif FLAGS.workload in aot_eager_backend_workloads:
logging.warning(
'These workloads cannot be fully compiled under current '
'PyTorch version. Proceeding with `backend=aot_eager`.')
model_params = torch.compile(model_params, backend='aot_eager')
else:
logging.info('Performing `torch.compile`.')
model_params = torch.compile(model_params)
logging.info('Initializing optimizer.')
with profiler.profile('Initializing optimizer'):
optimizer_state = init_optimizer_state(workload,
model_params,
model_state,
hyperparameters,
opt_init_rng)
logging.info('Initializing metrics bundle.')
# Bookkeeping.
train_state = {
'validation_goal_reached': False,
'test_goal_reached': False,
'is_time_remaining': True,
'last_eval_time': 0,
'training_complete': False,
'accumulated_submission_time': 0,
'accumulated_eval_time': 0,
'accumulated_logging_time': 0,
'last_step_end_time': None,
}
global_step = 0
eval_results = []
preemption_count = 0
# Loggers and checkpoint setup.
logging.info('Initializing checkpoint and logger.')
if log_dir is not None:
# If the checkpoint exists, load from the checkpoint.
(optimizer_state,
model_params,
model_state,
train_state,
eval_results,
global_step,
preemption_count) = checkpoint_utils.maybe_restore_checkpoint(
FLAGS.framework,
optimizer_state,
model_params,
model_state,
train_state,
eval_results,
global_step,
preemption_count,
checkpoint_dir=log_dir)
meta_data = logger_utils.get_meta_data(workload)
meta_file_name = os.path.join(log_dir, f'meta_data_{preemption_count}.json')
logging.info(f'Saving meta data to {meta_file_name}.')
logger_utils.write_json(meta_file_name, meta_data)
flag_file_name = os.path.join(log_dir, f'flags_{preemption_count}.json')
logging.info(f'Saving flags to {flag_file_name}.')
logger_utils.write_json(flag_file_name, flags.FLAGS.flag_values_dict())
metrics_logger = logger_utils.set_up_loggers(log_dir,
flags.FLAGS,
hyperparameters)
workload.attach_metrics_logger(metrics_logger)
global_start_time = get_time()
train_state['last_step_end_time'] = global_start_time
logging.info('Starting training loop.')
goals_reached = (
train_state['validation_goal_reached'] and
train_state['test_goal_reached'])
while train_state['is_time_remaining'] and \
not goals_reached and \
not train_state['training_complete']:
step_rng = prng.fold_in(rng, global_step)
data_select_rng, update_rng, eval_rng = prng.split(step_rng, 3)
with profiler.profile('Data selection'):
batch = data_selection(workload,
input_queue,
optimizer_state,
model_params,
model_state,
hyperparameters,
global_step,
data_select_rng)
try:
with profiler.profile('Update parameters'):
optimizer_state, model_params, model_state = update_params(
workload=workload,
current_param_container=model_params,
current_params_types=workload.model_params_types,
model_state=model_state,
hyperparameters=hyperparameters,
batch=batch,
loss_type=workload.loss_type,
optimizer_state=optimizer_state,
eval_results=eval_results,
global_step=global_step,
rng=update_rng)
except spec.TrainingCompleteError:
train_state['training_complete'] = True
global_step += 1
if (max_global_steps is not None) and (global_step == max_global_steps):
train_state['training_complete'] = True
train_step_end_time = get_time()
train_state['accumulated_submission_time'] += (
train_step_end_time - train_state['last_step_end_time'])
train_state['is_time_remaining'] = (
train_state['accumulated_submission_time'] <
workload.max_allowed_runtime_sec)
# Check if submission is eligible for an untimed eval.
if ((train_step_end_time - train_state['last_eval_time']) >=
workload.eval_period_time_sec or train_state['training_complete']):
with profiler.profile('Evaluation'):
try:
eval_start_time = get_time()
latest_eval_result = workload.eval_model(global_eval_batch_size,
model_params,
model_state,
eval_rng,
data_dir,
imagenet_v2_data_dir,
global_step)
# Check if targets reached
train_state['validation_goal_reached'] = (
workload.has_reached_validation_target(latest_eval_result) or
train_state['validation_goal_reached'])
train_state['test_goal_reached'] = (
workload.has_reached_test_target(latest_eval_result) or
train_state['test_goal_reached'])
# Save last eval time
eval_end_time = get_time()
train_state['last_eval_time'] = eval_end_time
# Accumulate eval time
train_state[
'accumulated_eval_time'] += eval_end_time - eval_start_time
# Add times to eval results for logging
latest_eval_result['score'] = (
train_state['accumulated_submission_time'])
latest_eval_result[
'total_duration'] = eval_end_time - global_start_time
latest_eval_result['accumulated_submission_time'] = train_state[
'accumulated_submission_time']
latest_eval_result['accumulated_eval_time'] = train_state[
'accumulated_eval_time']
latest_eval_result['accumulated_logging_time'] = train_state[
'accumulated_logging_time']
time_since_start = latest_eval_result['total_duration']
logging.info(f'Time since start: {time_since_start:.2f}s, '
f'\tStep: {global_step}, \t{latest_eval_result}')
eval_results.append((global_step, latest_eval_result))
logging_start_time = get_time()
if log_dir is not None:
metrics_logger.append_scalar_metrics(
latest_eval_result,
global_step=global_step,
preemption_count=preemption_count,
is_eval=True,
)
if save_checkpoints:
checkpoint_utils.save_checkpoint(
framework=FLAGS.framework,
optimizer_state=optimizer_state,
model_params=model_params,
model_state=model_state,
train_state=train_state,
eval_results=eval_results,
global_step=global_step,
preemption_count=preemption_count,
checkpoint_dir=log_dir,
save_intermediate_checkpoints=FLAGS
.save_intermediate_checkpoints)
if FLAGS.framework == 'pytorch' and torch.cuda.is_available():
torch.cuda.empty_cache()
logging_end_time = get_time()
train_state['accumulated_logging_time'] += (
logging_end_time - logging_start_time)
except RuntimeError as e:
logging.exception(f'Eval step {global_step} error.\n')
if 'out of memory' in str(e):
logging.warning('Error: GPU out of memory during eval during step '
f'{global_step}, error : {str(e)}.')
if torch.cuda.is_available():
torch.cuda.empty_cache()
train_state['last_step_end_time'] = get_time()
metrics = {'eval_results': eval_results, 'global_step': global_step}
if log_dir is not None:
metrics_logger.append_scalar_metrics(
{'score': train_state['accumulated_submission_time']},
global_step=global_step,
preemption_count=preemption_count)
metrics_logger.finish()
checkpoint_utils.save_checkpoint(
framework=FLAGS.framework,
optimizer_state=optimizer_state,
model_params=model_params,
model_state=model_state,
train_state=train_state,
eval_results=eval_results,
global_step=global_step,
preemption_count=preemption_count,
checkpoint_dir=log_dir,
save_intermediate_checkpoints=FLAGS.save_intermediate_checkpoints)
return train_state['accumulated_submission_time'], metrics
def score_submission_on_workload(workload: spec.Workload,
workload_name: str,
submission_path: str,
data_dir: str,
tuning_ruleset: str,
profiler: Optional[Profiler] = None,
max_global_steps: Optional[int] = None,
imagenet_v2_data_dir: Optional[str] = None,
tuning_search_space: Optional[str] = None,
num_tuning_trials: Optional[int] = None,
log_dir: Optional[str] = None,
save_checkpoints: Optional[bool] = True):
# Expand paths because '~' may not be recognized
data_dir = os.path.expanduser(data_dir)
if imagenet_v2_data_dir:
imagenet_v2_data_dir = os.path.expanduser(imagenet_v2_data_dir)
# Remove the trailing '.py' and convert the filepath to a Python module.
submission_module_path = workloads.convert_filepath_to_module(submission_path)
submission_module = importlib.import_module(submission_module_path)
init_optimizer_state = submission_module.init_optimizer_state
update_params = submission_module.update_params
data_selection = submission_module.data_selection
global_batch_size = submission_module.get_batch_size(workload_name)
# n_gpus has to be set here, because we cannot call the first Jax operation
# before pytorch_init().
n_gpus = max(N_GPUS, jax.local_device_count())
if global_batch_size % n_gpus != 0:
raise ValueError(
f'The global batch size ({global_batch_size}) has to be divisible by '
f'the number of GPUs ({n_gpus}).')
if hasattr(submission_module, 'get_eval_batch_size'):
# If the user specifies the eval batch size, use the provided one.
global_eval_batch_size = submission_module.get_eval_batch_size(
workload_name)
else:
global_eval_batch_size = workload.eval_batch_size
if global_eval_batch_size % n_gpus != 0:
raise ValueError(
f'The global eval batch size ({global_eval_batch_size}) has to be '
f'divisible by the number of GPUs ({n_gpus}).')
if tuning_ruleset == 'external':
# If the submission runner is responsible for hyperparameter tuning, load in
# the search space and generate a list of randomly selected hyperparameter
# settings from it.
if tuning_search_space is None:
raise ValueError(
'Must provide a tuning search space JSON file when using external '
'tuning.')
with open(tuning_search_space, 'r', encoding='UTF-8') as search_space_file:
tuning_search_space = halton.generate_search(
json.load(search_space_file), num_tuning_trials)
all_timings = []
all_metrics = []
for hi, hyperparameters in enumerate(tuning_search_space):
# Generate a new seed from hardware sources of randomness for each trial.
rng_seed = struct.unpack('I', os.urandom(4))[0]
logging.info('Using RNG seed %d', rng_seed)
rng = prng.PRNGKey(rng_seed)
# Because we initialize the PRNGKey with only a single 32 bit int, in the
# Jax implementation this means that rng[0] is all zeros, which means this
# could lead to unintentionally reusing the same seed of only rng[0] were
# ever used. By splitting the rng into 2, we mix the lower and upper 32
# bit ints, ensuring we can safely use either rng[0] or rng[1] as a random
# number.
rng, _ = prng.split(rng, 2)
logging.info(f'--- Tuning run {hi + 1}/{num_tuning_trials} ---')
tuning_dir_name = None
if log_dir is not None:
tuning_dir_name = os.path.join(log_dir, f'trial_{hi + 1}')
logging.info(f'Creating tuning directory at {tuning_dir_name}.')
logger_utils.makedir(tuning_dir_name)
# If existing hyperparameter exists, use saved
# hyperparameters for consistency.
hyperparameters = logger_utils.write_hparams(hyperparameters,
tuning_dir_name)
tuning_search_space[hi] = hyperparameters
with profiler.profile('Train'):
if 'imagenet' not in workload_name:
imagenet_v2_data_dir = None
timing, metrics = train_once(workload, global_batch_size,
global_eval_batch_size,
data_dir, imagenet_v2_data_dir,
init_optimizer_state,
update_params, data_selection,
hyperparameters, rng,
profiler,
max_global_steps,
tuning_dir_name,
save_checkpoints=save_checkpoints,)
all_timings.append(timing)
all_metrics.append(metrics)
score = min(all_timings)
for ti in range(num_tuning_trials):
logging.info(f'Tuning trial {ti + 1}/{num_tuning_trials}')
logging.info(f'Hyperparameters: {tuning_search_space[ti]}')
logging.info(f'Metrics: {all_metrics[ti]}')
logging.info(f'Timing: {all_timings[ti]}')
num_evals = len(all_metrics[ti]['eval_results'])
logging.info(f'Total number of evals: {num_evals}')
logging.info('=' * 20)
else:
rng_seed = struct.unpack('q', os.urandom(8))[0]
rng = prng.PRNGKey(rng_seed)
# If the submission is responsible for tuning itself, we only need to run it
# once and return the total time.
with profiler.profile('Train'):
score, _ = train_once(
workload, global_batch_size, global_eval_batch_size,
data_dir, imagenet_v2_data_dir,
init_optimizer_state, update_params, data_selection,
None, rng, profiler, max_global_steps, log_dir,
save_checkpoints=save_checkpoints)
return score
def main(_):
if FLAGS.profile:
profiler = Profiler()
else:
profiler = PassThroughProfiler()
if FLAGS.framework == 'pytorch':
pytorch_init(USE_PYTORCH_DDP, RANK, profiler)
workload_metadata = WORKLOADS[FLAGS.workload]
# Prevent OOM on librispeech conformer.
if FLAGS.workload == 'librispeech_conformer':
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.85'
# Extend path according to framework.
workload_metadata['workload_path'] = os.path.join(
BASE_WORKLOADS_DIR,
workload_metadata['workload_path'] + f'_{FLAGS.framework}',
'workload.py')
workload_init_kwargs = {}
if FLAGS.librispeech_tokenizer_vocab_path:
workload_init_kwargs['tokenizer_vocab_path'] = (
FLAGS.librispeech_tokenizer_vocab_path)
workload = workloads.import_workload(
workload_path=workload_metadata['workload_path'],
workload_class_name=workload_metadata['workload_class_name'],
workload_init_kwargs=workload_init_kwargs)
experiment_name = FLAGS.experiment_name
if experiment_name and FLAGS.append_timestamp:
experiment_name += datetime.datetime.now().strftime('-%Y-%m-%d-%H-%M-%S')
logging_dir_path = logger_utils.get_log_dir(FLAGS.experiment_dir,
FLAGS.workload,
FLAGS.framework,
experiment_name,
FLAGS.resume_last_run,
FLAGS.overwrite)
score = score_submission_on_workload(
workload=workload,
workload_name=FLAGS.workload,
submission_path=FLAGS.submission_path,
data_dir=FLAGS.data_dir,
tuning_ruleset=FLAGS.tuning_ruleset,
profiler=profiler,
max_global_steps=FLAGS.max_global_steps,
imagenet_v2_data_dir=FLAGS.imagenet_v2_data_dir,
tuning_search_space=FLAGS.tuning_search_space,
num_tuning_trials=FLAGS.num_tuning_trials,
log_dir=logging_dir_path,
save_checkpoints=FLAGS.save_checkpoints)
logging.info(f'Final {FLAGS.workload} score: {score}')
if FLAGS.profile:
logging.info(profiler.summary())
if USE_PYTORCH_DDP:
# Cleanup.
dist.destroy_process_group()
if __name__ == '__main__':
flags.mark_flag_as_required('workload')
flags.mark_flag_as_required('framework')
flags.mark_flag_as_required('submission_path')
flags.mark_flag_as_required('experiment_dir')
flags.mark_flag_as_required('experiment_name')
app.run(main)
|
import jax
print('JAX identified %d GPU devices' % jax.local_device_count())
print('Generating RNG seed for CUDA sanity check ... ')
rng = jax.random.PRNGKey(0)
data_rng, shuffle_rng = jax.random.split(rng, 2)
if jax.local_device_count() == 8 and data_rng is not None:
print('Woohoo 8 GPUs present and CUDA works!!')
|
import jax.dlpack
import torch
from algorithmic_efficiency import spec
def jax_to_pytorch(x: spec.Tensor, take_ownership: bool = False) -> spec.Tensor:
return torch.utils.dlpack.from_dlpack(
jax.dlpack.to_dlpack(x, take_ownership=take_ownership))
def pytorch_to_jax(x: torch.Tensor) -> spec.Tensor:
x = x.contiguous() # https://github.com/google/jax/issues/8082
return jax.dlpack.from_dlpack(torch.utils.dlpack.to_dlpack(x))
|
import os
from typing import Tuple
from absl import logging
import jax
import tensorflow as tf
import torch
import torch.distributed as dist
from algorithmic_efficiency import spec
from algorithmic_efficiency.profiler import Profiler
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \
BatchNorm as ConformerBatchNorm
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.models import \
BatchNorm as DeepspeechBatchNorm
def pytorch_setup() -> Tuple[bool, int, torch.device, int]:
use_pytorch_ddp = 'LOCAL_RANK' in os.environ
rank = int(os.environ['LOCAL_RANK']) if use_pytorch_ddp else 0
device = torch.device(f'cuda:{rank}' if torch.cuda.is_available() else 'cpu')
n_gpus = torch.cuda.device_count()
return use_pytorch_ddp, rank, device, n_gpus
def pytorch_init(use_pytorch_ddp: bool, rank: int, profiler: Profiler) -> None:
# Make sure no GPU memory is preallocated to Jax.
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = 'false'
# Only use CPU for Jax to avoid memory issues.
# Setting the corresponding environment variable here has no effect; it has to
# be done before jax and tensorflow (!) are imported for the first time.
jax.config.update('jax_platforms', 'cpu')
# From the docs: "(...) causes cuDNN to benchmark multiple convolution
# algorithms and select the fastest."
torch.backends.cudnn.benchmark = True
if use_pytorch_ddp:
# Avoid tf input pipeline creating too many threads.
if rank != 0:
tf.config.threading.set_intra_op_parallelism_threads(1)
tf.config.threading.set_inter_op_parallelism_threads(1)
torch.cuda.set_device(rank)
profiler.set_local_rank(rank)
# Only log once (for local rank == 0).
if rank != 0:
def logging_pass(*args):
pass
logging.info = logging_pass
# Initialize the process group.
dist.init_process_group('nccl')
def sync_ddp_time(time: float, device: torch.device) -> float:
time_tensor = torch.tensor(time, dtype=torch.float64, device=device)
dist.all_reduce(time_tensor, op=dist.ReduceOp.MAX)
return time_tensor.item()
def update_batch_norm_fn(module: spec.ParameterContainer,
update_batch_norm: bool) -> None:
bn_layers = (
torch.nn.modules.batchnorm._BatchNorm, # PyTorch BN base class.
ConformerBatchNorm, # Custom BN class for conformer model.
DeepspeechBatchNorm, # Custom BN class for deepspeech model.
)
if isinstance(module, bn_layers):
if not update_batch_norm:
module.eval()
module.momentum_backup = module.momentum
# module.momentum can be float or torch.Tensor.
module.momentum = 0. * module.momentum_backup
elif hasattr(module, 'momentum_backup'):
module.momentum = module.momentum_backup
module.track_running_stats = update_batch_norm
|
"""Proxy functions in front of the Jax RNG API or a compatible Numpy RNG API."""
from typing import Any, List, Union
from absl import flags
from absl import logging
import numpy as np
try:
import jax.random as jax_rng
except (ImportError, ModuleNotFoundError):
logging.warning(
'Could not import jax.random for the submission runner, falling back to '
'numpy random_utils.')
jax_rng = None
FLAGS = flags.FLAGS
# Annoyingly, RandomState(seed) requires seed to be in [0, 2 ** 32 - 1] (an
# unsigned int), while RandomState.randint only accepts and returns signed ints.
MAX_INT32 = 2**31
MIN_INT32 = -MAX_INT32
SeedType = Union[int, list, np.ndarray]
def _signed_to_unsigned(seed: SeedType) -> SeedType:
if isinstance(seed, int):
return seed + 2**32 if seed < 0 else seed
if isinstance(seed, list):
return [s + 2**32 if s < 0 else s for s in seed]
if isinstance(seed, np.ndarray):
return np.array([s + 2**32 if s < 0 else s for s in seed.tolist()])
def _fold_in(seed: SeedType, data: Any) -> List[Union[SeedType, Any]]:
rng = np.random.RandomState(seed=_signed_to_unsigned(seed))
new_seed = rng.randint(MIN_INT32, MAX_INT32, dtype=np.int32)
return [new_seed, data]
def _split(seed: SeedType, num: int = 2) -> SeedType:
rng = np.random.RandomState(seed=_signed_to_unsigned(seed))
return rng.randint(MIN_INT32, MAX_INT32, dtype=np.int32, size=[num, 2])
def _PRNGKey(seed: SeedType) -> SeedType: # pylint: disable=invalid-name
return split(seed, num=2)[0]
# It is usually bad practice to use FLAGS outside of the main() function, but
# the alternative is having to pipe the framework flag to all functions that may
# need it, which seems unnecessarily cumbersome.
def _check_jax_install() -> None:
if jax_rng is None:
raise ValueError(
'Must install jax to use the jax RNG library, or use PyTorch and pass '
'--framework=pytorch to use the Numpy version instead.')
def fold_in(seed: SeedType, data: Any) -> List[Union[SeedType, Any]]:
if FLAGS.framework == 'jax':
_check_jax_install()
return jax_rng.fold_in(seed, data)
return _fold_in(seed, data)
def split(seed: SeedType, num: int = 2) -> SeedType:
if FLAGS.framework == 'jax':
_check_jax_install()
return jax_rng.split(seed, num)
return _split(seed, num)
def PRNGKey(seed: SeedType) -> SeedType: # pylint: disable=invalid-name
if FLAGS.framework == 'jax':
_check_jax_install()
return jax_rng.PRNGKey(seed)
return _PRNGKey(seed)
|
"""Algorithmic Efficiency."""
__version__ = '0.0.1'
|
"""MLPerf™ Algorithmic Efficiency API."""
import abc
import enum
import functools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
from absl import logging
import jax
from torch import nn
import torch.nn.functional as F
class LossType(enum.Enum):
SOFTMAX_CROSS_ENTROPY = 0
SIGMOID_CROSS_ENTROPY = 1
MEAN_SQUARED_ERROR = 2
CTC_LOSS = 3
MEAN_ABSOLUTE_ERROR = 4
class ForwardPassMode(enum.Enum):
TRAIN = 0
EVAL = 1
# ... ?
class ParameterType(enum.Enum):
WEIGHT = 0
BIAS = 1
CONV_WEIGHT = 2
BATCH_NORM_SCALE = 3
BATCH_NORM_BIAS = 4
LAYER_NORM_SCALE = 5
LAYER_NORM_BIAS = 6
EMBEDDING = 7
ATTENTION_Q = 8
ATTENTION_K = 9
ATTENTION_V = 10
ATTENTION_OUT = 11
ATTENTION_QKV = 12 # This is used for implementations that fuse QKV together.
# We need to split this out because otherwise fused QKV models will have a
# different number of biases.
ATTENTION_BIAS = 13
# Of course, Tensor knows its shape and dtype.
# Tensor = Union[jnp.array, np.array, tf.Tensor, torch.Tensor, ...]
Tensor = Any
# Define this so that if using pytree iteration utilities, can iterate over the
# model shapes pytree without iterating over the shape tuples.
class ShapeTuple:
def __init__(self, shape_tuple):
self.shape_tuple = shape_tuple
def __repr__(self):
return f'ShapeTuple({self.shape_tuple})'
def __eq__(self, other):
return self.shape_tuple == other.shape_tuple
Shape = Union[Tuple[int],
Tuple[int, int],
Tuple[int, int, int],
Tuple[int, int, int, int],
ShapeTuple]
ParameterShapeTree = Dict[str, Dict[str, Shape]]
# If necessary, these can be zipped together easily given they have the same
# structure, to get an iterator over pairs of leaves.
ParameterKey = str
# Dicts can be arbitrarily nested.
ParameterContainer = Union[Dict[ParameterKey, Dict[ParameterKey, Tensor]],
nn.Module]
ParameterTypeTree = Dict[ParameterKey, Dict[ParameterKey, ParameterType]]
RandomState = Any # Union[jax.random.PRNGKey, int, bytes, ...]
OptimizerState = Union[Dict[str, Any], Tuple[Any, Any]]
Hyperparameters = Any
Timing = int
Steps = int
# BN EMAs.
ModelAuxiliaryState = Any
ModelInitState = Tuple[ParameterContainer, ModelAuxiliaryState]
class Workload(metaclass=abc.ABCMeta):
def __init__(self, *args, **kwargs) -> None:
del args
del kwargs
self._param_shapes: Optional[ParameterShapeTree] = None
self._param_types: Optional[ParameterTypeTree] = None
self._eval_iters: Dict[str, Iterator] = {}
self.metrics_logger = None
@property
@abc.abstractmethod
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
@abc.abstractmethod
def has_reached_validation_target(self, eval_result: Dict[str,
float]) -> bool:
"""Return whether or not the workload validation goal has been reached."""
@abc.abstractmethod
def has_reached_test_target(self, eval_result: Dict[str, float]) -> bool:
"""Return whether or not the workload test goal has been reached."""
@abc.abstractmethod
def _build_input_queue(
self,
data_rng: RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, Any]]:
"""Build the input queue for the workload data.
This is the only function that is NOT allowed to be called by submitters.
For Jax this should return an itertor over tensors of shape
(num_devices, per_device_batch_size, ...), and for PyTorch this should
return tensors of shape (global_batch_size, ...).
The required keys are 'inputs' and 'targets', and in general the naming
convention should be plural key names because the values are batches of
examples.
"""
def attach_metrics_logger(self, metrics_logger) -> None:
"""Attaches a metric logger to workload."""
self.metrics_logger = metrics_logger
return
@property
@abc.abstractmethod
def validation_target_value(self) -> float:
"""The validation target value to reach."""
@property
@abc.abstractmethod
def test_target_value(self) -> float:
"""The test target value to reach."""
@property
@abc.abstractmethod
def loss_type(self) -> LossType:
"""The type of loss function."""
@property
@abc.abstractmethod
def num_train_examples(self) -> int:
"""The size of the training set."""
@property
@abc.abstractmethod
def eval_batch_size(self) -> int:
"""The batch size for evaluation."""
@property
@abc.abstractmethod
def num_eval_train_examples(self) -> int:
"""The number of training examples to evaluate metrics on."""
@property
@abc.abstractmethod
def num_validation_examples(self) -> int:
"""The size of the validation set."""
@property
@abc.abstractmethod
def num_test_examples(self) -> int:
"""The size of the test set."""
@property
@abc.abstractmethod
def train_mean(self) -> Any:
"""The mean of the training data."""
@property
@abc.abstractmethod
def train_stddev(self) -> Any:
"""The stddev of the training data."""
@property
@abc.abstractmethod
def max_allowed_runtime_sec(self) -> int:
"""The max allowed runtime of the workload in seconds."""
@property
@abc.abstractmethod
def eval_period_time_sec(self) -> int:
"""The eval period of the workload in seconds."""
@property
@abc.abstractmethod
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
@property
def param_shapes(self):
"""The shapes of the parameters in the workload model."""
if self._param_shapes is None:
raise ValueError(
'This should not happen, workload.init_model_fn() should be called '
'before workload.param_shapes!')
return self._param_shapes
@property
def model_params_types(self):
"""The types of the parameters in the workload model."""
if self._param_types is None:
raise ValueError(
'This should not happen, workload.init_model_fn() should be called '
'before workload.param_types!')
return self._param_types
@abc.abstractmethod
def is_output_params(self, param_key: ParameterKey) -> bool:
"""Whether a key in ParameterContainer is the output layer parameters."""
# InitModelFn = Callable[
# Tuple[RandomState, Optional[float], Optional[float]],
# ParameterContainer]
@abc.abstractmethod
def init_model_fn(self,
rng: RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> ModelInitState:
"""Return (initial_params, initial_model_state)."""
# ModelFn = Callable[
# Tuple[
# ParameterContainer,
# Dict[str, Tensor],
# ModelAuxiliaryState,
# ForwardPassMode,
# RandomState,
# bool],
# Tensor]
@abc.abstractmethod
def model_fn(self,
params: ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, Tensor],
model_state: ModelAuxiliaryState,
mode: ForwardPassMode,
rng: RandomState,
update_batch_norm: bool) -> Tuple[Tensor, ModelAuxiliaryState]:
"""Return logits_batch"""
# Possible side effect of updating BN.
def output_activation_fn(self, logits_batch: Tensor,
framework: str) -> Tensor:
"""Turn logits into probabilities, according to the loss_type property."""
if framework not in ['pytorch', 'jax']:
raise ValueError(
f'`framework` has to be either `pytorch` or `jax`, got {framework}.')
activation_fn = {
LossType.MEAN_SQUARED_ERROR: lambda z: z,
LossType.MEAN_ABSOLUTE_ERROR: lambda z: z,
}
is_pytorch = framework == 'pytorch' # If False, framework == 'jax'.
softmax_fn = (
functools.partial(F.softmax, dim=-1) if is_pytorch else jax.nn.softmax)
sigmoid_fn = F.sigmoid if is_pytorch else jax.nn.sigmoid
activation_fn[LossType.SOFTMAX_CROSS_ENTROPY] = softmax_fn
activation_fn[LossType.SIGMOID_CROSS_ENTROPY] = sigmoid_fn
activation_fn[LossType.CTC_LOSS] = softmax_fn
return activation_fn[self.loss_type](logits_batch)
# LossFn = Callable[Tuple[Tensor, Tensor], Tensor]
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
@abc.abstractmethod
def loss_fn(
self,
# Dense or one-hot labels, or a tuple of (tensor, padding) for speech.
label_batch: Union[Tuple[Tensor, Tensor], Tensor],
logits_batch: Union[Tuple[Tensor, Tensor], Tensor],
mask_batch: Optional[Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
@abc.abstractmethod
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: ParameterContainer,
model_state: ModelAuxiliaryState,
rng: RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Evaluate the model on a given dataset split, return final scalars."""
def eval_model(self,
global_batch_size: int,
params: ParameterContainer,
model_state: ModelAuxiliaryState,
rng: RandomState,
data_dir: str,
imagenet_v2_data_dir: Optional[str],
global_step: int) -> Dict[str, float]:
"""Run a full evaluation of the model."""
logging.info('Evaluating on the training split.')
train_metrics = self._eval_model_on_split(
split='eval_train',
num_examples=self.num_eval_train_examples,
global_batch_size=global_batch_size,
params=params,
model_state=model_state,
rng=rng,
data_dir=data_dir,
global_step=global_step)
eval_metrics = {'train/' + k: v for k, v in train_metrics.items()}
# We always require a validation set.
logging.info('Evaluating on the validation split.')
validation_metrics = self._eval_model_on_split(
'validation',
num_examples=self.num_validation_examples,
global_batch_size=global_batch_size,
params=params,
model_state=model_state,
rng=rng,
data_dir=data_dir,
global_step=global_step)
for k, v in validation_metrics.items():
eval_metrics['validation/' + k] = v
eval_metrics['validation/num_examples'] = self.num_validation_examples
# Evaluate on the test set. TODO(znado): always eval on the test set.
try:
if self.num_test_examples is not None:
logging.info('Evaluating on the test split.')
test_metrics = self._eval_model_on_split(
'test',
num_examples=self.num_test_examples,
global_batch_size=global_batch_size,
params=params,
model_state=model_state,
rng=rng,
data_dir=imagenet_v2_data_dir if imagenet_v2_data_dir else data_dir,
global_step=global_step)
for k, v in test_metrics.items():
eval_metrics['test/' + k] = v
eval_metrics['test/num_examples'] = self.num_test_examples
except NotImplementedError:
pass
return eval_metrics
class TrainingCompleteError(Exception):
pass
# Training algorithm track submission functions, to be filled in by the
# submitter.
InitOptimizerFn = Callable[[
Workload,
ParameterContainer,
ModelAuxiliaryState,
Hyperparameters,
RandomState
],
OptimizerState]
def init_optimizer_state(workload: Workload,
model_params: ParameterContainer,
model_state: ModelAuxiliaryState,
hyperparameters: Hyperparameters,
rng: RandomState) -> OptimizerState:
# return initial_optimizer_state
pass
UpdateReturn = Tuple[OptimizerState, ParameterContainer, ModelAuxiliaryState]
UpdateParamsFn = Callable[[
Workload,
ParameterContainer,
ParameterTypeTree,
ModelAuxiliaryState,
Hyperparameters,
Dict[str, Tensor],
LossType,
OptimizerState,
List[Tuple[int, float]],
int,
RandomState
],
UpdateReturn]
# Each call to this function is considered a "step".
# Can raise a TrainingCompleteError if it believes it has achieved the goal and
# wants to end the run and receive a final free eval. It will not be restarted,
# and if has not actually achieved the goal then it will be considered as not
# achieved the goal and get an infinite time score. Most submissions will likely
# wait until the next free eval and not use this functionality.
def update_params(workload: Workload,
current_param_container: ParameterContainer,
current_params_types: ParameterTypeTree,
model_state: ModelAuxiliaryState,
hyperparameters: Hyperparameters,
batch: Dict[str, Tensor],
loss_type: LossType,
optimizer_state: OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: RandomState) -> UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
pass
DataSelectionFn = Callable[[
Workload,
Iterator[Dict[str, Any]],
OptimizerState,
ParameterContainer,
LossType,
Hyperparameters,
int,
RandomState
],
Tuple[Tensor, Tensor]]
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: Workload,
input_queue: Iterator[Dict[str, Any]],
optimizer_state: OptimizerState,
current_param_container: ParameterContainer,
model_state: ModelAuxiliaryState,
hyperparameters: Hyperparameters,
global_step: int,
rng: RandomState) -> Dict[str, Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
# return next(input_queue)
pass
def get_batch_size(workload_name: str) -> int:
"""Return the global batch size to use for a given workload."""
pass
|
"""Utilities for initializing parameters.
Note: Code adapted from
https://github.com/google/jax/blob/main/jax/_src/nn/initializers.py.
"""
import math
from torch import nn
def pytorch_default_init(module: nn.Module) -> None:
# Perform lecun_normal initialization.
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
std = math.sqrt(1. / fan_in) / .87962566103423978
nn.init.trunc_normal_(module.weight, std=std)
if module.bias is not None:
nn.init.constant_(module.bias, 0.)
|
"""Hyperparameter sweeps with Halton sequences of quasi-random numbers.
Based off the algorithms described in https://arxiv.org/abs/1706.03200. Inspired
by the code in
https://github.com/google/uncertainty-baselines/blob/master/uncertainty_baselines/halton.py
written by the same authors.
"""
import collections
import functools
import itertools
import math
from typing import Any, Callable, Dict, List, Sequence, Text, Tuple, Union
from absl import logging
from numpy import random
_SweepSequence = List[Dict[Text, Any]]
_GeneratorFn = Callable[[float], Tuple[Text, float]]
def generate_primes(n: int) -> List[int]:
"""Generate primes less than `n` (except 2) using the Sieve of Sundaram."""
half_m1 = int((n - 2) / 2)
sieve = [0] * (half_m1 + 1)
for outer in range(1, half_m1 + 1):
inner = outer
while outer + inner + 2 * outer * inner <= half_m1:
sieve[outer + inner + (2 * outer * inner)] = 1
inner += 1
return [2 * i + 1 for i in range(1, half_m1 + 1) if sieve[i] == 0]
def _is_prime(n: int) -> bool:
"""Check if `n` is a prime number."""
return all(n % i != 0 for i in range(2, int(n**0.5) + 1)) and n != 2
def _generate_dim(num_samples: int,
base: int,
per_dim_shift: bool,
shuffled_seed_sequence: List[int]) -> List[float]:
"""Generate `num_samples` from a Van der Corput sequence with base `base`.
Args:
num_samples: int, the number of samples to generate.
base: int, the base for the Van der Corput sequence. Must be prime.
per_dim_shift: boolean, if true then each dim in the sequence is shifted by
a random float (and then passed through fmod(n, 1.0) to keep in the range
[0, 1)).
shuffled_seed_sequence: An optional list of length `base`, used as the input
sequence to generate samples. Useful for deterministic testing.
Returns:
A shuffled Van der Corput sequence of length `num_samples`, and optionally a
shift added to each dimension.
Raises:
ValueError: if `base` is negative or not prime.
"""
if base < 0 or not _is_prime(base):
raise ValueError('Each Van der Corput sequence requires a prime `base`, '
f'received {base}.')
rng = random.RandomState(base)
if shuffled_seed_sequence is None:
shuffled_seed_sequence = list(range(1, base))
# np.random.RandomState uses MT19937 (see
# https://numpy.org/devdocs/reference/random/legacy.html#numpy.random.RandomState).
rng.shuffle(shuffled_seed_sequence)
shuffled_seed_sequence = [0] + shuffled_seed_sequence
# Optionally generate a random float in the range [0, 1) to shift this
# dimension by.
dim_shift = rng.random_sample() if per_dim_shift else None
dim_sequence = []
for i in range(1, num_samples + 1):
num = 0.
denominator = base
while i:
num += shuffled_seed_sequence[i % base] / denominator
denominator *= base
i //= base
if per_dim_shift:
num = math.fmod(num + dim_shift, 1.0)
dim_sequence.append(num)
return dim_sequence
Matrix = List[List[int]]
def generate_sequence(num_samples: int,
num_dims: int,
skip: int = 100,
per_dim_shift: bool = True,
shuffle_sequence: bool = True,
primes: Sequence[int] = None,
shuffled_seed_sequence: Matrix = None) -> Matrix:
"""Generate `num_samples` from a Halton sequence of dimension `num_dims`.
Each dimension is generated independently from a shuffled Van der Corput
sequence with a different base prime, and an optional shift added. The
generated points are, by default, shuffled before returning.
Args:
num_samples: int, the number of samples to generate.
num_dims: int, the number of dimensions per generated sample.
skip: non-negative int, if positive then a sequence is generated and the
first `skip` samples are discarded in order to avoid unwanted
correlations.
per_dim_shift: boolean, if true then each dim in the sequence is shifted by
a random float (and then passed through fmod(n, 1.0) to keep in the range
[0, 1)).
shuffle_sequence: boolean, if true then shuffle the sequence before
returning.
primes: An optional sequence (of length `num_dims`) of prime numbers to use
as the base for the Van der Corput sequence for each dimension. Useful for
deterministic testing.
shuffled_seed_sequence: An optional list of length `num_dims`, with each
element being a sequence of length `primes[d]`, used as the input sequence
to the Van der Corput sequence for each dimension. Useful for
deterministic testing.
Returns:
A shuffled Halton sequence of length `num_samples`, where each sample has
`num_dims` dimensions, and optionally a shift added to each dimension.
Raises:
ValueError: if `skip` is negative.
ValueError: if `primes` is provided and not of length `num_dims`.
ValueError: if `shuffled_seed_sequence` is provided and not of length
`num_dims`.
ValueError: if `shuffled_seed_sequence[d]` is provided and not of length
`primes[d]` for any d in range(num_dims).
"""
if skip < 0:
raise ValueError(f'Skip must be non-negative, received: {skip}.')
if primes is not None and len(primes) != num_dims:
raise ValueError(
'If passing in a sequence of primes it must be the same length as '
f'num_dims={num_dims}, received {primes} (len {len(primes)}).')
if shuffled_seed_sequence is not None:
if len(shuffled_seed_sequence) != num_dims:
raise ValueError(
'If passing in `shuffled_seed_sequence` it must be the same length '
f'as num_dims={num_dims}, received {shuffled_seed_sequence} '
f'(len {len(shuffled_seed_sequence)}).')
for d in range(num_dims):
if len(shuffled_seed_sequence[d]) != primes[d]:
raise ValueError(
'If passing in `shuffled_seed_sequence` it must have element `{d}` '
'be a sequence of length `primes[{d}]`={expected}, received '
'{actual} (len {length})'.format(
d=d,
expected=primes[d],
actual=shuffled_seed_sequence[d],
length=shuffled_seed_sequence[d]))
if primes is None:
primes = []
prime_attempts = 1
while len(primes) < num_dims + 1:
primes = generate_primes(1000 * prime_attempts)
prime_attempts += 1
primes = primes[-num_dims - 1:-1]
# Skip the first `skip` points in the sequence because they can have unwanted
# correlations.
num_samples += skip
halton_sequence = []
for d in range(num_dims):
if shuffled_seed_sequence is None:
dim_shuffled_seed_sequence = None
else:
dim_shuffled_seed_sequence = shuffled_seed_sequence[d]
dim_sequence = _generate_dim(
num_samples=num_samples,
base=primes[d],
shuffled_seed_sequence=dim_shuffled_seed_sequence,
per_dim_shift=per_dim_shift)
dim_sequence = dim_sequence[skip:]
halton_sequence.append(dim_sequence)
# Transpose the 2-D list to be shape [num_samples, num_dims].
halton_sequence = list(zip(*halton_sequence))
# Shuffle the sequence.
if shuffle_sequence:
random.shuffle(halton_sequence)
return halton_sequence
def _generate_double_point(name: Text,
min_val: float,
max_val: float,
scaling: Text,
halton_point: float) -> Tuple[str, float]:
"""Generate a float hyperparameter value from a Halton sequence point."""
if scaling not in ['linear', 'log']:
raise ValueError(
'Only log or linear scaling is supported for floating point '
f'parameters. Received {scaling}.')
if scaling == 'log':
# To transform from [0, 1] to [min_val, max_val] on a log scale we do:
# min_val * exp(x * log(max_val / min_val)).
rescaled_value = (
min_val * math.exp(halton_point * math.log(max_val / min_val)))
else:
rescaled_value = halton_point * (max_val - min_val) + min_val
return name, rescaled_value
def _generate_discrete_point(name: str,
feasible_points: Sequence[Any],
halton_point: float) -> Any:
"""Generate a discrete hyperparameter value from a Halton sequence point."""
index = int(math.floor(halton_point * len(feasible_points)))
return name, feasible_points[index]
_DiscretePoints = collections.namedtuple('_DiscretePoints', 'feasible_points')
def discrete(feasible_points: Sequence[Any]) -> _DiscretePoints:
return _DiscretePoints(feasible_points)
def interval(start: int, end: int) -> Tuple[int, int]:
return start, end
def loguniform(name: Text, range_endpoints: Tuple[int, int]) -> _GeneratorFn:
min_val, max_val = range_endpoints
return functools.partial(_generate_double_point,
name,
min_val,
max_val,
'log')
def uniform(
name: Text, search_points: Union[_DiscretePoints,
Tuple[int, int]]) -> _GeneratorFn:
if isinstance(search_points, _DiscretePoints):
return functools.partial(_generate_discrete_point,
name,
search_points.feasible_points)
min_val, max_val = search_points
return functools.partial(_generate_double_point,
name,
min_val,
max_val,
'linear')
def product(sweeps: Sequence[_SweepSequence]) -> _SweepSequence:
"""Cartesian product of a list of hyperparameter generators."""
# A List[Dict] of hyperparameter names to sweep values.
hyperparameter_sweep = []
for hyperparameter_index in range(len(sweeps)):
hyperparameter_sweep.append([])
# Keep iterating until the iterator in sweep() ends.
sweep_i = sweeps[hyperparameter_index]
for point_index in range(len(sweep_i)):
hyperparameter_name, value = list(sweep_i[point_index].items())[0]
hyperparameter_sweep[-1].append((hyperparameter_name, value))
return list(map(dict, itertools.product(*hyperparameter_sweep)))
def sweep(name, feasible_points: Sequence[Any]) -> _SweepSequence:
return [{name: x} for x in feasible_points.feasible_points]
def zipit(generator_fns_or_sweeps: Sequence[Union[_GeneratorFn,
_SweepSequence]],
length: int) -> _SweepSequence:
"""Zip together a list of hyperparameter generators.
Args:
generator_fns_or_sweeps: A sequence of either:
- Generator functions that accept a Halton sequence point and return a
quasi-ranom sample, such as those returned by halton.uniform() or
halton.loguniform()
- Lists of dicts with one key/value such as those returned by
halton.sweep()
We need to support both of these (instead of having halton.sweep() return
a list of generator functions) so that halton.sweep() can be used directly
as a list.
length: the number of hyperparameter points to generate. If any of the
elements in generator_fns_or_sweeps are sweep lists, and their length is
less than `length`, the sweep generation will be terminated and will be
the same length as the shortest sweep sequence.
Returns:
A list of dictionaries, one for each trial, with a key for each unique
hyperparameter name from generator_fns_or_sweeps.
"""
halton_sequence = generate_sequence(
num_samples=length, num_dims=len(generator_fns_or_sweeps))
# A List[Dict] of hyperparameter names to sweep values.
hyperparameter_sweep = []
for trial_index in range(length):
hyperparameter_sweep.append({})
for hyperparameter_index in range(len(generator_fns_or_sweeps)):
halton_point = halton_sequence[trial_index][hyperparameter_index]
if callable(generator_fns_or_sweeps[hyperparameter_index]):
generator_fn = generator_fns_or_sweeps[hyperparameter_index]
hyperparameter_name, value = generator_fn(halton_point)
else:
sweep_list = generator_fns_or_sweeps[hyperparameter_index]
if trial_index > len(sweep_list):
break
hyperparameter_point = sweep_list[trial_index]
hyperparameter_name, value = list(hyperparameter_point.items())[0]
hyperparameter_sweep[trial_index][hyperparameter_name] = value
return hyperparameter_sweep
_DictSearchSpace = Dict[str, Dict[str, Union[str, float, Sequence]]]
_ListSearchSpace = List[Dict[str, Union[str, float, Sequence]]]
def generate_search(search_space: Union[_DictSearchSpace, _ListSearchSpace],
num_trials: int) -> List[collections.namedtuple]:
"""Generate a random search with the given bounds and scaling.
Args:linear
search_space: A dict where the keys are the hyperparameter names, and the
values are a dict of:
- {"min": x, "max": y, "scaling": z} where x and y are floats and z is
one of "linear" or "log"
- {"feasible_points": [...]} for discrete hyperparameters.
Alternatively, it can be a list of dict where keys are the hyperparameter
names, and the values are hyperparameters.
num_trials: the number of hyperparameter points to generate.
Returns:
A list of length `num_trials` of namedtuples, each of which has attributes
corresponding to the given hyperparameters, and values randomly sampled.
"""
if isinstance(search_space, dict):
all_hyperparameter_names = list(search_space.keys())
elif isinstance(search_space, list):
assert len(search_space) > 0
all_hyperparameter_names = list(search_space[0].keys())
else:
raise AttributeError('tuning_search_space should either be a dict or list.')
named_tuple_class = collections.namedtuple('Hyperparameters',
all_hyperparameter_names)
if isinstance(search_space, dict):
hyperparameter_generators = []
for name, space in search_space.items():
if 'feasible_points' in space: # Discrete search space.
generator_fn = uniform(name, discrete(space['feasible_points']))
else: # Continuous space.
if space['scaling'] == 'log':
generator_fn = loguniform(name, interval(space['min'], space['max']))
else:
generator_fn = uniform(name, interval(space['min'], space['max']))
hyperparameter_generators.append(generator_fn)
return [
named_tuple_class(**p)
for p in zipit(hyperparameter_generators, num_trials)
]
else:
hyperparameters = []
updated_num_trials = min(num_trials, len(search_space))
if num_trials != len(search_space):
logging.info(f'--num_tuning_trials was set to {num_trials}, but '
f'{len(search_space)} trial(s) found in the JSON file. '
f'Updating --num_tuning_trials to {updated_num_trials}.')
for trial in search_space:
hyperparameters.append(named_tuple_class(**trial))
return hyperparameters[:updated_num_trials]
|
"""Utilities for data processing."""
from typing import Dict, Iterable, Optional, Tuple
import jax
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler
from torch.utils.data import Sampler
from algorithmic_efficiency import spec
def shard_and_maybe_pad_np(
batch: Dict[str, spec.Tensor],
padding_value: int = 0,
global_batch_size: Optional[int] = None) -> Dict[str, spec.Tensor]:
"""Prepare tf data for JAX or PyTorch DDP.
Convert an input batch from tf Tensors to numpy arrays, pad it with
padding_value if the batch size is not divisible by the number of devices,
create the corresponding mask, and reshape it to be sharded across devices.
"""
local_device_count = max(torch.cuda.device_count(), jax.local_device_count())
inputs = batch['inputs']
current_batch_size = inputs[0].shape[0] if isinstance(
inputs, tuple) else inputs.shape[0]
remainder_size = current_batch_size % local_device_count
if remainder_size != 0:
if global_batch_size is not None:
pad_size = global_batch_size - current_batch_size
else:
pad_size = local_device_count - remainder_size
targets = batch['targets']
targets_shape = tuple(
targets[0].shape if isinstance(targets, tuple) else targets.shape)
# We need a 2d mask for WMT.
mask_shape = targets_shape if len(targets_shape) < 3 else targets_shape[0]
# Get weights from batch if there are any.
weights = batch.get('weights')
# The weights will also be padded.
batch['weights'] = np.ones(mask_shape) if weights is None else weights
def _prepare(x):
# Use _numpy() for zero-copy conversion between TF and NumPy.
if not isinstance(x, np.ndarray):
x = x._numpy() # pylint: disable=protected-access
# Pad if remainder_size != 0 (should only be possible during evaluation).
if remainder_size != 0:
x = pad(x, pad_size, 'jax', padding_value=padding_value)
# Reshape (global_batch_size, ...) to
# (local_device_count, per_device_batch_size, ...).
# Assumes that `global_batch_size % local_device_count == 0`.
return x.reshape((local_device_count, -1, *x.shape[1:]))
return jax.tree_map(_prepare, batch)
def pad(tensor: spec.Tensor,
pad_size: int,
framework: str,
padding_value: int = 0) -> spec.Tensor:
if len(tensor) > 1:
pad_size = (pad_size, *tensor.shape[1:])
if framework == 'pytorch':
padding = torch.full(
pad_size, padding_value, dtype=tensor.dtype, device=tensor.device)
padded_tensor = torch.cat((tensor, padding), dim=0)
elif framework == 'jax':
padding = np.full(pad_size, padding_value, dtype=tensor.dtype)
padded_tensor = np.concatenate((tensor, padding), axis=0)
else:
raise ValueError(f'Framework has to be pytorch or jax, but is {framework}.')
return padded_tensor
def mixup_pytorch(batch: Tuple[spec.Tensor, spec.Tensor],
alpha: float = 0.2) -> Tuple[spec.Tensor, spec.Tensor]:
inputs, targets = batch
# Transform to one-hot targets.
targets = F.one_hot(targets, num_classes=1000)
# Compute weight for convex combination by sampling from Beta distribution.
beta_dist = torch.distributions.beta.Beta(alpha, alpha)
weight = beta_dist.sample()
# Return convex combination of original and shifted inputs and targets.
inputs = weight * inputs + (1.0 - weight) * torch.roll(inputs, 1, dims=0)
targets = weight * targets + (1.0 - weight) * torch.roll(targets, 1, dims=0)
return (inputs, targets)
# github.com/SeungjunNah/DeepDeblur-PyTorch/blob/master/src/data/sampler.py
class DistributedEvalSampler(Sampler):
r"""DistributedEvalSampler is different from DistributedSampler.
It does NOT add extra samples to make it evenly divisible.
DistributedEvalSampler should NOT be used for training. The distributed
processes could hang forever.
See this issue for details: https://github.com/pytorch/pytorch/issues/22584
shuffle is disabled by default
DistributedEvalSampler is for evaluation purpose where synchronization does
not happen every epoch.
Synchronization should be done outside the dataloader loop.
Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
process can pass a :class`~DistributedEvalSampler` instance as
a :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`rank` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within
:attr:`num_replicas`. By default, :attr:`rank` is retrieved from the
current distributed group.
shuffle (bool, optional): If ``True``, sampler will shuffle the
indices. Default: ``False``
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
.. warning::
In distributed mode, calling the :meth`set_epoch(epoch) <set_epoch>`
method at the beginning of each epoch **before** creating the
:class:`DataLoader` iterator is necessary to make shuffling work
properly across multiple epochs. Otherwise, the same ordering will be
always used.
Example::
>>> sampler = DistributedSampler(dataset) if is_distributed else None
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
... sampler=sampler)
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(self,
dataset: torch.utils.data.Dataset,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = False,
seed: int = 0) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError('Requires distributed package to be available.')
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError('Requires distributed package to be available.')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
# true value without extra samples
self.total_size = len(self.dataset)
indices = list(range(self.total_size))
indices = indices[self.rank:self.total_size:self.num_replicas]
# true value without extra samples
self.num_samples = len(indices)
self.shuffle = shuffle
self.seed = seed
def __iter__(self) -> Iterable[int]:
if self.shuffle:
# Deterministically shuffle based on epoch and seed.
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# Subsample.
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""Sets the epoch for this sampler. When :attr:`shuffle=True`, this
ensures all replicas use a different random ordering for each epoch.
Otherwise, the next iteration of this sampler will yield the same
ordering.
Args:
epoch: An int indicating epoch number.
"""
self.epoch = epoch
# Modified from github.com/pytorch/pytorch/issues/23900#issuecomment-518858050.
def cycle(iterable: Iterable,
keys: Tuple[str, ...] = ('inputs', 'targets'),
custom_sampler: bool = False,
use_mixup: bool = False,
mixup_alpha: float = 0.2) -> Iterable:
iterator = iter(iterable)
epoch = 0
while True:
try:
batch = next(iterator)
if use_mixup:
assert keys == ('inputs', 'targets')
batch = mixup_pytorch(batch, alpha=mixup_alpha)
assert len(keys) == len(batch)
yield dict(zip(keys, batch))
except StopIteration:
if custom_sampler and isinstance(iterable, DataLoader):
epoch += 1
iterable.sampler.set_epoch(epoch)
iterator = iter(iterable)
# Inspired by
# github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Classification/
# ConvNets/image_classification/dataloaders.py
class PrefetchedWrapper:
def __init__(self,
dataloader: torch.utils.data.DataLoader,
device: torch.device,
start_epoch: int = 0) -> None:
self.dataloader = dataloader
self.epoch = start_epoch
self.device = device
def __len__(self) -> int:
return len(self.dataloader)
def __iter__(self) -> Iterable[Tuple[spec.Tensor, spec.Tensor]]:
if isinstance(self.dataloader.sampler, DistributedSampler):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return self.prefetched_loader()
def prefetched_loader(self) -> Iterable[Tuple[spec.Tensor, spec.Tensor]]:
stream = torch.cuda.Stream()
first = True
for next_inputs, next_targets in self.dataloader:
with torch.cuda.stream(stream):
next_inputs = next_inputs.to(
self.device, dtype=torch.float, non_blocking=True)
next_targets = next_targets.to(self.device, non_blocking=True)
if not first:
yield inputs, targets
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
inputs = next_inputs
targets = next_targets
yield inputs, targets
|
"""Utilities for dealing with parameter-related logic like types and shapes."""
from typing import Dict
import flax
import jax
from torch import nn
from algorithmic_efficiency import spec
def pytorch_param_shapes(model: nn.Module) -> Dict[str, spec.ShapeTuple]:
return {k: spec.ShapeTuple(v.shape) for k, v in model.named_parameters()}
def pytorch_param_types(
param_shapes: Dict[str, spec.ShapeTuple]) -> Dict[str, spec.ParameterType]:
param_types = {}
for name in param_shapes.keys():
if 'bn' in name:
if 'weight' in name or 'scale' in name:
param_types[name] = spec.ParameterType.BATCH_NORM_SCALE
elif 'bias' in name:
param_types[name] = spec.ParameterType.BATCH_NORM_BIAS
else:
raise ValueError(f'Unrecognized batch norm parameter: {name}.')
elif 'norm' in name or 'ln' in name:
if 'weight' in name or 'scale' in name:
param_types[name] = spec.ParameterType.LAYER_NORM_SCALE
elif 'bias' in name:
param_types[name] = spec.ParameterType.LAYER_NORM_BIAS
else:
raise ValueError(f'Unrecognized layer norm parameter: {name}.')
elif 'conv' in name:
if 'bias' in name:
param_types[name] = spec.ParameterType.BIAS
else:
param_types[name] = spec.ParameterType.CONV_WEIGHT
elif ('embedding' in name or 'embed' in name) and 'weight' in name:
param_types[name] = spec.ParameterType.EMBEDDING
elif 'attn' in name or 'attention' in name:
if 'bias' in name:
param_types[name] = spec.ParameterType.ATTENTION_BIAS
elif 'k_proj' in name or 'key' in name:
param_types[name] = spec.ParameterType.ATTENTION_K
elif 'q_proj' in name or 'query' in name:
param_types[name] = spec.ParameterType.ATTENTION_Q
elif 'v_proj' in name or 'value' in name:
param_types[name] = spec.ParameterType.ATTENTION_V
elif 'out' in name and 'weight' in name:
param_types[name] = spec.ParameterType.ATTENTION_OUT
elif 'scale' in name:
param_types[name] = spec.ParameterType.WEIGHT
elif 'in_proj_weight' in name:
param_types[name] = spec.ParameterType.ATTENTION_QKV
else:
raise ValueError(f'Unrecognized attention parameter: {name}.')
elif 'bias' in name:
param_types[name] = spec.ParameterType.BIAS
else:
param_types[name] = spec.ParameterType.WEIGHT
return param_types
def jax_param_shapes(
params: spec.ParameterContainer) -> spec.ParameterShapeTree:
return jax.tree_map(lambda x: spec.ShapeTuple(x.shape), params)
def jax_param_types(param_shapes: spec.ParameterShapeTree,
parent_name: str = '') -> Dict[str, spec.ParameterType]:
param_types = {}
for name, value in param_shapes.items():
name = name.lower()
if isinstance(value, dict) or isinstance(value, flax.core.FrozenDict):
param_types[name] = jax_param_types(
value, parent_name=parent_name + '/' + name)
else:
if 'batchnorm' in parent_name or 'bn' in parent_name:
if name == 'scale':
param_types[name] = spec.ParameterType.BATCH_NORM_SCALE
elif name == 'bias':
param_types[name] = spec.ParameterType.BATCH_NORM_BIAS
else:
raise ValueError(
f'Unrecognized batch norm parameter: {parent_name}/{name}.')
elif 'layernorm' in parent_name or 'ln' in parent_name:
if name == 'scale':
param_types[name] = spec.ParameterType.LAYER_NORM_SCALE
elif name == 'bias':
param_types[name] = spec.ParameterType.LAYER_NORM_BIAS
else:
raise ValueError(
f'Unrecognized layer norm parameter: {parent_name}/{name}.')
elif 'conv' in parent_name:
if 'bias' in name:
param_types[name] = spec.ParameterType.BIAS
else:
param_types[name] = spec.ParameterType.CONV_WEIGHT
# Note that this is exact equality, not contained in, because
# flax.linen.Embed names the embedding parameter "embedding"
# https://github.com/google/flax/blob/main/flax/linen/linear.py#L604.
elif ('embedding' in name or
('embedding' in parent_name and name == 'kernel')):
param_types[name] = spec.ParameterType.EMBEDDING
elif 'attention' in parent_name:
if name == 'bias':
param_types[name] = spec.ParameterType.ATTENTION_BIAS
elif 'key' in parent_name and name == 'kernel':
param_types[name] = spec.ParameterType.ATTENTION_K
elif 'query' in parent_name and name == 'kernel':
param_types[name] = spec.ParameterType.ATTENTION_Q
elif 'value' in parent_name and name == 'kernel':
param_types[name] = spec.ParameterType.ATTENTION_V
elif 'out' in parent_name and name == 'kernel':
param_types[name] = spec.ParameterType.ATTENTION_OUT
elif 'scale' in name:
param_types[name] = spec.ParameterType.WEIGHT
elif 'in_proj_weight' in name:
param_types[name] = spec.ParameterType.ATTENTION_QKV
else:
raise ValueError(
f'Unrecognized attention parameter: {parent_name}/{name}.')
elif 'bias' in name:
param_types[name] = spec.ParameterType.BIAS
else:
param_types[name] = spec.ParameterType.WEIGHT
return param_types
|
"""Utilities for checkpointing.
Note: Code adapted from
https://github.com/google/init2winit/blob/master/init2winit/checkpoint.py.
"""
import os
from typing import Sequence, Tuple
from absl import logging
from flax import jax_utils
from flax.training import checkpoints as flax_checkpoints
from flax.training.checkpoints import latest_checkpoint
import jax
import numpy as np
from tensorflow.io import gfile # pytype: disable=import-error
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
_, _, DEVICE, _ = pytorch_setup()
CheckpointReturn = Tuple[spec.OptimizerState,
spec.ParameterContainer,
spec.ModelAuxiliaryState,
dict,
list,
int,
int]
def maybe_restore_checkpoint(framework: str,
optimizer_state: spec.OptimizerState,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
train_state: dict,
eval_results: list,
global_step: int,
preemption_count: int,
checkpoint_dir: str) -> CheckpointReturn:
"""Optionally restores from a checkpoint.
The checkpoint logic is as follows: if there is a checkpoint in
`checkpoint_dir`, restore it. Else, don't restore any checkpoint, and
just return the passed-in optimizer_state, model_params,
model_state, and train_state.
Args:
framework: Current framework (e.g., `jax` or `pytorch`).
optimizer_state: Optimizer state.
model_params: Model parameters.
model_state: Model state such as batch statistics when batch
normalization is used.
train_state: Training state such as `last_eval_time`.
eval_results: Previous evaluation results.
global_step: Global step.
preemption_count: Number of preemptions.
checkpoint_dir: The training directory where we will look for a checkpoint.
Returns:
A tuple of (optimizer_state, model_params, model_state,
train_state, eval_results, global_step, preemption_count).
"""
if framework == 'jax':
opt_state, opt_update_fn = optimizer_state
else:
opt_state, opt_update_fn = optimizer_state, None
uninitialized_global_step = -1
uninitialized_preemption_count = -1
checkpoint_state = {
'model_params': model_params,
'optimizer_state': opt_state,
'model_state': model_state,
'train_state': train_state,
'eval_results': None,
'global_step': uninitialized_global_step,
'preemption_count': uninitialized_preemption_count,
}
if framework == 'jax':
latest_ckpt = flax_checkpoints.restore_checkpoint(
checkpoint_dir, target=checkpoint_state)
save_path = os.path.join(checkpoint_dir,
'checkpoint_' + str(latest_ckpt['global_step']))
else:
latest_ckpt = checkpoint_state
save_path = latest_checkpoint(checkpoint_dir)
if save_path is not None:
latest_ckpt = torch.load(save_path, map_location=DEVICE)
# Load_latest_checkpoint() will return checkpoint_state if
# checkpoint_dir does not exist or if it exists and contains no checkpoints.
found_checkpoint = latest_ckpt['global_step'] != uninitialized_global_step
if not found_checkpoint:
return (optimizer_state,
model_params,
model_state,
train_state,
eval_results,
global_step,
preemption_count)
# If there's the latest checkpoint in the checkpoint_dir, restore from that.
if framework == 'jax':
checkpoint_state = replicate_checkpoint(
latest_ckpt,
pytree_keys=[
'optimizer_state',
'model_params',
'model_state',
])
checkpoint_state['optimizer_state'] = (checkpoint_state['optimizer_state'],
opt_update_fn)
checkpoint_state['eval_results'] = [
(value, key) for key, value in latest_ckpt['eval_results'].items()
]
else:
checkpoint_state = latest_ckpt
if isinstance(model_params, torch.nn.DataParallel):
model_params = model_params.module
model_params.load_state_dict(checkpoint_state['model_params'])
checkpoint_state['model_params'] = model_params
for key in optimizer_state.keys():
optimizer_state[key].load_state_dict(
checkpoint_state['optimizer_state'][key])
checkpoint_state['optimizer_state'][key] = optimizer_state[key]
logging.info(f'Loaded checkpoint from {save_path}.')
return (checkpoint_state['optimizer_state'],
checkpoint_state['model_params'],
checkpoint_state['model_state'],
checkpoint_state['train_state'],
list(checkpoint_state['eval_results']),
checkpoint_state['global_step'],
checkpoint_state['preemption_count'] + 1)
def replicate_checkpoint(latest: dict,
pytree_keys: Sequence[str],
replicate: bool = True) -> dict:
"""Restores from the provided checkpoint.
Args:
latest: A dict representing the state of the
checkpoint we want to restore.
pytree_keys: A sequence of keys into `latest` that are pytrees, which will
be replicated if replicate=True.
replicate: If set, replicate the state across devices.
Returns:
A JAX pytree holding the arrays that need to be replicated/unreplicated.
"""
pytree = {k: latest[k] for k in pytree_keys}
if replicate:
pytree = jax_utils.replicate(pytree)
extra_dict = {k: latest[k] for k in latest.keys() if k not in pytree_keys}
pytree.update(extra_dict)
return pytree
def save_checkpoint(framework: str,
optimizer_state: spec.OptimizerState,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
train_state: dict,
eval_results: list,
global_step: int,
preemption_count: int,
checkpoint_dir: str,
save_intermediate_checkpoints: bool) -> None:
"""Save the checkpoint in `checkpoint_dir`.
Args:
framework: Current framework (e.g., `jax` or `pytorch`).
optimizer_state: Optimizer state.
model_params: Model parameters.
model_state: Model state such as batch statistics when batch
normalization is used.
train_state: Training state such as `last_eval_time`.
eval_results: Previous evaluation results.
global_step: Global step.
preemption_count: Number of preemptions.
checkpoint_dir: The training directory where we will look for a checkpoint.
save_intermediate_checkpoints: Whether to save intermediate checkpoints.
Returns:
A tuple of (optimizer_state, model_params, model_state,
train_state, eval_results, global_step, preemption_count).
"""
if framework == 'jax':
model_params = jax.device_get(jax_utils.unreplicate(model_params))
opt_state, _ = optimizer_state
opt_state = jax.device_get(jax_utils.unreplicate(opt_state))
model_state = jax.device_get(jax_utils.unreplicate(model_state))
else:
if isinstance(model_params, torch.nn.DataParallel):
model_params = model_params.module
model_params = model_params.state_dict()
optimizer_state_dict = {}
for key in optimizer_state.keys():
if hasattr(optimizer_state[key], 'state_dict'):
optimizer_state_dict[key] = optimizer_state[key].state_dict()
else:
logging.warning(
f'The optimizer state for key {key} is not saved, because '
f'{type(optimizer_state[key])} has not implemented a state_dict() '
'method.')
opt_state = optimizer_state_dict
checkpoint_state = {
'model_params': model_params,
'optimizer_state': opt_state,
'model_state': model_state,
'train_state': train_state,
'eval_results': tuple(eval_results),
'global_step': global_step,
'preemption_count': preemption_count,
}
save_path = os.path.join(checkpoint_dir, f'checkpoint_{global_step}')
if framework == 'jax':
flax_checkpoints.save_checkpoint(
checkpoint_dir,
target=checkpoint_state,
step=global_step,
overwrite=True,
keep=np.Inf if save_intermediate_checkpoints else 1)
else:
if not save_intermediate_checkpoints:
checkpoint_files = gfile.glob(
os.path.join(checkpoint_dir, 'checkpoint_*'))
for path in checkpoint_files:
logging.info('Removing checkpoint at %s', path)
gfile.rmtree(path)
torch.save(checkpoint_state, save_path)
logging.info(f'Saved checkpoint to {save_path}.')
|
"""Utilities for logging."""
import collections
import json
import logging
import os.path
import platform
import re
import shutil
import subprocess
import sys
from typing import Any, Optional
from absl import flags
from clu import metric_writers
import GPUtil
import pandas as pd
import psutil
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP, RANK, DEVICE, _ = pytorch_setup()
try:
import wandb # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.exception('Unable to import wandb.')
wandb = None
def makedir(dir_name: str, exist_ok: bool = True) -> None:
if RANK == 0:
# Only one worker should create the required dir.
os.makedirs(name=dir_name, exist_ok=exist_ok)
def get_log_dir(
experiment_dir: str,
workload: spec.Workload,
framework: str,
experiment_name: str,
resume_last_run: bool,
overwrite: bool,
) -> Optional[str]:
if RANK != 0:
return
# Construct path to experiment workload directory.
experiment_dir = os.path.expanduser(experiment_dir)
workload_dir_name = f'{workload}_{framework}'
if experiment_name is None:
experiment_path = os.path.join(experiment_dir, workload_dir_name)
else:
experiment_path = os.path.join(experiment_dir,
experiment_name,
workload_dir_name)
if os.path.exists(experiment_path):
if overwrite:
logging.info(
f'Removing existing experiment directory {experiment_path} because '
'--overwrite was set.')
shutil.rmtree(experiment_path)
elif resume_last_run:
logging.info(
f'Resuming from experiment directory {experiment_path} because '
'--resume_last_run was set.')
else:
resume = input(
'Found existing experiment dir with the same name: {}. Do you wish '
'to resume training from this dir? [y/N]:'.format(experiment_path))
if resume.lower() != 'y':
sys.exit()
logging.info(f'Creating experiment directory at {experiment_path}.')
makedir(experiment_path)
return experiment_path
def write_hparams(hparams: spec.Hyperparameters,
tuning_dir: str) -> spec.Hyperparameters:
hparams_file_name = os.path.join(tuning_dir, 'hparams.json')
if os.path.exists(hparams_file_name):
# If hparams.json already exist, use the previously saved hyperparameters.
logging.info('Loading hparams from %s.', hparams_file_name)
with open(hparams_file_name, 'r') as f:
hparams_dict = json.load(f)
hparams = collections.namedtuple('Hyperparameters',
hparams_dict)(**hparams_dict)
else:
logging.info('Saving hparams to %s.', hparams_file_name)
if RANK == 0:
with open(hparams_file_name, 'w') as f:
f.write(json.dumps(hparams._asdict(), indent=2))
return hparams
def write_json(name: str, log_dict: dict, indent: int = 2) -> None:
if RANK == 0:
with open(name, 'w') as f:
f.write(json.dumps(log_dict, indent=indent))
def write_to_csv(
metrics: dict,
csv_path: str,
) -> None:
try:
with open(csv_path, 'r') as csv_file:
measurements = pd.read_csv(csv_file)
measurements = pd.concat([measurements, pd.DataFrame([metrics])])
except (pd.errors.EmptyDataError, FileNotFoundError) as e:
measurements = pd.DataFrame([metrics], columns=sorted(metrics.keys()))
if isinstance(e, pd.errors.EmptyDataError):
logging.info('Measurements file is empty. Create a new one, starting '
'with metrics from this step.')
with open(csv_path, 'w') as csv_file:
measurements.to_csv(csv_file, index=False)
return
def _get_utilization() -> dict:
util_data = {}
# CPU
util_data['cpu.util.avg_percent_since_last'] = psutil.cpu_percent(
interval=None) # non-blocking (cpu util percentage since last call)
util_data['cpu.freq.current'] = psutil.cpu_freq().current
# Memory
memory_util = psutil.virtual_memory()
util_data['mem.total'] = memory_util.total
util_data['mem.available'] = memory_util.available
util_data['mem.used'] = memory_util.used
util_data['mem.percent_used'] = memory_util.percent
# Disk
disk_io_counters = psutil.disk_io_counters()
util_data['mem.read_bytes_since_boot'] = disk_io_counters.read_bytes
util_data['mem.write_bytes_since_boot'] = disk_io_counters.write_bytes
# Network
net_io_counters = psutil.net_io_counters()
util_data['net.bytes_sent_since_boot'] = net_io_counters.bytes_sent
util_data['net.bytes_recv_since_boot'] = net_io_counters.bytes_recv
# GPU
gpus = GPUtil.getGPUs()
if gpus:
gpu_count = len(gpus)
util_data['gpu.count'] = gpu_count
avg_gpu_load = 0
avg_gpu_memory_util = 0
avg_gpu_memory_total = 0
avg_gpu_memory_used = 0
avg_gpu_memory_free = 0
avg_gpu_temperature = 0
for gpu in gpus:
idx = gpu.id
util_data[f'gpu.{idx}.compute.util'] = gpu.load
util_data[f'gpu.{idx}.mem.util'] = gpu.memoryUtil
util_data[f'gpu.{idx}.mem.total'] = gpu.memoryTotal
util_data[f'gpu.{idx}.mem.used'] = gpu.memoryUsed
util_data[f'gpu.{idx}.mem.free'] = gpu.memoryFree
util_data[f'gpu.{idx}.temp.current'] = gpu.temperature
avg_gpu_load += gpu.load
avg_gpu_memory_util += gpu.memoryUtil
avg_gpu_memory_total += gpu.memoryTotal
avg_gpu_memory_used += gpu.memoryUsed
avg_gpu_memory_free += gpu.memoryFree
avg_gpu_temperature += gpu.temperature
util_data['gpu.avg.compute.util'] = avg_gpu_load / gpu_count
util_data['gpu.avg.mem.util'] = avg_gpu_memory_util / gpu_count
util_data['gpu.avg.mem.total'] = avg_gpu_memory_total / gpu_count
util_data['gpu.avg.mem.used'] = avg_gpu_memory_used / gpu_count
util_data['gpu.avg.mem.free'] = avg_gpu_memory_free / gpu_count
util_data['gpu.avg.temp.current'] = avg_gpu_temperature / gpu_count
return util_data
def _get_system_hardware_info() -> dict:
system_hardware_info = {}
try:
system_hardware_info['cpu_model_name'] = _get_cpu_model_name()
system_hardware_info['cpu_count'] = psutil.cpu_count()
except: # pylint: disable=bare-except
logging.info('Unable to record cpu information. Continuing without it.')
gpus = GPUtil.getGPUs()
if gpus:
try:
system_hardware_info['gpu_model_name'] = gpus[0].name
system_hardware_info['gpu_count'] = len(gpus)
system_hardware_info['gpu_driver'] = gpus[0].driver
except: # pylint: disable=bare-except
logging.info('Unable to record gpu information. Continuing without it.')
return system_hardware_info
def _get_system_software_info() -> dict:
system_software_info = {}
system_software_info['os_platform'] = \
platform.platform() # Ex. 'Linux-5.4.48-x86_64-with-glibc2.29'
system_software_info['python_version'] = platform.python_version(
) # Ex. '3.8.10'
system_software_info['python_compiler'] = platform.python_compiler(
) # Ex. 'GCC 9.3.0'
# Note: do not store hostname as that may be sensitive
try:
system_software_info['git_branch'] = _get_git_branch()
system_software_info['git_commit_hash'] = _get_git_commit_hash()
# Note: do not store git repo url as it may be sensitive or contain a
# secret.
except: # pylint: disable=bare-except
logging.info('Unable to record git information. Continuing without it.')
return system_software_info
def _get_git_commit_hash() -> str:
return subprocess.check_output(['git', 'rev-parse',
'HEAD']).decode('ascii').strip()
def _get_git_branch() -> str:
return subprocess.check_output(['git', 'rev-parse', '--abbrev-ref',
'HEAD']).decode('ascii').strip()
def _get_cpu_model_name() -> str:
output = subprocess.check_output(['lscpu']).decode('ascii').strip()
return re.findall(r'(?=Model name:\s{1,}).*',
output)[0].split('Model name:')[1].strip()
def _is_primitive_type(item: Any) -> bool:
primitive = (float, int, str, bool)
return isinstance(item, primitive)
def _get_workload_properties(workload: spec.Workload) -> dict:
workload_properties = {}
skip_list = ['param_shapes', 'model_params_types']
keys = [
key for key in dir(workload)
if not key.startswith('_') and key not in skip_list
]
for key in keys:
try:
attr = getattr(workload, key)
except: # pylint: disable=bare-except
logging.info(
f'Unable to record workload.{key} information. Continuing without it.'
)
if _is_primitive_type(attr):
workload_properties[f'workload.{key}'] = attr
return workload_properties
def get_meta_data(workload: spec.Workload) -> dict:
meta_data = {}
workload_properties = _get_workload_properties(workload)
meta_data.update(workload_properties)
utilization_measurements = _get_utilization()
meta_data.update(utilization_measurements)
system_software_info = _get_system_software_info()
meta_data.update(system_software_info)
system_hardware_info = _get_system_hardware_info()
meta_data.update(system_hardware_info)
return meta_data
class MetricLogger(object):
"""Used to log all measurements during training.
Note: Writes are not atomic, so files may become corrupted if preempted at
the wrong time.
"""
def __init__(self,
csv_path: str,
eval_csv_path: str,
events_dir: Optional[str] = None,
configs: Optional[flags.FLAGS] = None,
hyperparameters: Optional[spec.Hyperparameters] = None) -> None:
self._measurements = {}
self._csv_path = csv_path
self._eval_csv_path = eval_csv_path
self.use_wandb = configs.use_wandb
if events_dir:
self._tb_metric_writer = metric_writers.create_default_writer(events_dir)
if wandb is not None and self.use_wandb:
wandb.init(
dir=events_dir, tags=[flags.FLAGS.workload, flags.FLAGS.framework])
wandb.config.update(configs)
wandb.config.update(hyperparameters._asdict())
def append_scalar_metrics(self,
metrics: dict,
global_step: int,
preemption_count: Optional[int] = None,
is_eval: bool = False) -> None:
metrics['global_step'] = global_step
if preemption_count is not None:
metrics['preemption_count'] = preemption_count
write_to_csv(metrics, self._csv_path)
if is_eval:
write_to_csv(metrics, self._eval_csv_path)
if self._tb_metric_writer:
self._tb_metric_writer.write_scalars(
step=int(metrics['global_step']), scalars=metrics)
self._tb_metric_writer.flush()
if wandb is not None and self.use_wandb:
wandb.log(metrics)
def finish(self) -> None:
if wandb is not None and self.use_wandb:
wandb.finish()
def set_up_loggers(train_dir: str,
configs: flags.FLAGS,
hyperparameters: spec.Hyperparameters) -> MetricLogger:
csv_path = os.path.join(train_dir, 'measurements.csv')
eval_csv_path = os.path.join(train_dir, 'eval_measurements.csv')
metrics_logger = MetricLogger(
csv_path=csv_path,
eval_csv_path=eval_csv_path,
events_dir=train_dir,
configs=configs,
hyperparameters=hyperparameters)
return metrics_logger
|
"""Profiling code for Jax and PyTorch.
Modified from:
https://github.com/Lightning-AI/lightning/tree/master/src/pytorch_lightning/profilers.
"""
from collections import defaultdict
from contextlib import contextmanager
import os
import time
from typing import Dict, Generator, List, Optional, Tuple
import numpy as np
class Profiler:
def __init__(self, local_rank: Optional[int] = None) -> None:
self._local_rank = local_rank
self.current_actions: Dict[str, float] = {}
self.recorded_durations = defaultdict(list)
self.start_time = time.monotonic()
def set_local_rank(self, local_rank: int) -> None:
self._local_rank = local_rank
@property
def local_rank(self) -> int:
return 0 if self._local_rank is None else self._local_rank
def start(self, action_name: str) -> None:
if self.local_rank != 0:
pass
if action_name in self.current_actions:
raise ValueError(
f'Attempted to start {action_name} which has already started.')
self.current_actions[action_name] = time.monotonic()
def stop(self, action_name: str) -> None:
if self.local_rank != 0:
pass
end_time = time.monotonic()
if action_name not in self.current_actions:
raise ValueError(f'Attempting to stop recording an action '
f'({action_name}) which was never started.')
start_time = self.current_actions.pop(action_name)
duration = end_time - start_time
self.recorded_durations[action_name].append(duration)
@contextmanager
def profile(self, action_name: str) -> Generator:
try:
self.start(action_name)
yield action_name
finally:
self.stop(action_name)
def _make_report(
self
) -> Tuple[List[Tuple[str, float, float, int, float, float]], int, float]:
total_duration = time.monotonic() - self.start_time
report = [(str(a),
float(np.mean(d)),
float(np.std(d)),
len(d),
float(np.sum(d)),
100.0 * float(np.sum(d)) / total_duration) for a,
d in self.recorded_durations.items()]
report.sort(key=lambda x: x[5], reverse=True)
total_calls = sum(x[3] for x in report)
return report, total_calls, total_duration
def summary(self) -> str:
sep = os.linesep
output_string = ''
output_string += f'Profiler Report{sep}:'
if len(self.recorded_durations) > 0:
max_key = max(len(k) for k in self.recorded_durations.keys())
def log_row(action, mean, std, num_calls, total, per):
row = f'{sep}| {action:<{max_key}s}\t| '
row += f'{mean:<15}\t| {std:<15}\t|'
row += f' {num_calls:<15}\t| {total:<15}\t| {per:<15}\t|'
return row
header_string = log_row('Action',
'Mean Duration (s)',
'Std Duration (s)',
'Num Calls',
'Total Time (s)',
'Percentage %')
output_string_len = len(header_string.expandtabs())
sep_lines = f'{sep}{"-" * output_string_len}'
output_string += sep_lines + header_string + sep_lines
report, total_calls, total_duration = self._make_report()
output_string += log_row('Total',
'-----',
'-----',
f'{total_calls:}',
f'{total_duration:.5}',
'100 %')
output_string += sep_lines
for action, mean_duration, std_duration, num_calls, \
total_duration, duration_per in report:
output_string += log_row(
action,
f'{mean_duration:.5}',
f'{std_duration:.5}',
f'{num_calls}',
f'{total_duration:.5}',
f'{duration_per:.5}',
)
output_string += sep_lines
output_string += sep
return output_string
class PassThroughProfiler(Profiler):
def start(self, action_name: str) -> None:
pass
def stop(self, action_name: str) -> None:
pass
|
""" Registry of workload info
"""
import importlib
import inspect
import os
from algorithmic_efficiency import spec
BASE_WORKLOADS_DIR = 'algorithmic_efficiency/workloads/'
WORKLOADS = {
'cifar': {
'workload_path': 'cifar/cifar', 'workload_class_name': 'CifarWorkload'
},
'criteo1tb': {
'workload_path': 'criteo1tb/criteo1tb',
'workload_class_name': 'Criteo1TbDlrmSmallWorkload',
},
'criteo1tb_test': {
'workload_path': 'criteo1tb/criteo1tb',
'workload_class_name': 'Criteo1TbDlrmSmallTestWorkload',
},
'fastmri': {
'workload_path': 'fastmri/fastmri',
'workload_class_name': 'FastMRIWorkload',
},
'imagenet_resnet': {
'workload_path': 'imagenet_resnet/imagenet',
'workload_class_name': 'ImagenetResNetWorkload',
},
'imagenet_vit': {
'workload_path': 'imagenet_vit/imagenet',
'workload_class_name': 'ImagenetVitWorkload',
},
'librispeech_conformer': {
'workload_path': 'librispeech_conformer/librispeech',
'workload_class_name': 'LibriSpeechConformerWorkload',
},
'librispeech_deepspeech': {
'workload_path': 'librispeech_deepspeech/librispeech',
'workload_class_name': 'LibriSpeechDeepSpeechWorkload',
},
'mnist': {
'workload_path': 'mnist/mnist', 'workload_class_name': 'MnistWorkload'
},
'ogbg': {
'workload_path': 'ogbg/ogbg', 'workload_class_name': 'OgbgWorkload'
},
'wmt': {'workload_path': 'wmt/wmt', 'workload_class_name': 'WmtWorkload'},
}
def convert_filepath_to_module(path: str):
base, extension = os.path.splitext(path)
if extension != '.py':
raise ValueError(f'Path: {path} must be a python file (*.py)')
return base.replace('/', '.')
def import_workload(workload_path: str,
workload_class_name: str,
return_class=False,
workload_init_kwargs=None) -> spec.Workload:
"""Import and add the workload to the registry.
This importlib loading is nice to have because it allows runners to avoid
installing the dependencies of all the supported frameworks. For example, if
a submitter only wants to write Jax code, the try/except below will catch
the import errors caused if they do not have the PyTorch dependencies
installed on their system.
Args:
workload_path: the path to the `workload.py` file to load.
workload_class_name: the name of the Workload class that implements the
`Workload` abstract class in `spec.py`.
return_class: if true, then the workload class is returned instead of the
instantiated object. Useful for testing when methods need to be overriden.
workload_init_kwargs: kwargs to pass to the workload constructor.
"""
# Remove the trailing '.py' and convert the filepath to a Python module.
workload_path = convert_filepath_to_module(workload_path)
# Import the workload module.
workload_module = importlib.import_module(workload_path)
# Get everything defined in the workload module (including our class).
workload_module_members = inspect.getmembers(workload_module)
workload_class = None
for name, value in workload_module_members:
if name == workload_class_name:
workload_class = value
break
if workload_class is None:
raise ValueError(
f'Could not find member {workload_class_name} in {workload_path}. '
'Make sure the Workload class is spelled correctly and defined in '
'the top scope of the module.')
if return_class:
return workload_class
return workload_class(**workload_init_kwargs)
|
"""MNIST workload parent class."""
import abc
import functools
import math
from typing import Any, Dict, Iterator, Optional
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
import torch
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
import algorithmic_efficiency.random_utils as prng
USE_PYTORCH_DDP, _, _, _ = pytorch_setup()
def _normalize(image: spec.Tensor, mean: float, stddev: float) -> spec.Tensor:
return (tf.cast(image, tf.float32) - mean) / stddev
def _build_mnist_dataset(
data_rng: jax.random.PRNGKey,
num_train_examples: int,
num_validation_examples: int,
train_mean: float,
train_stddev: float,
split: str,
data_dir: str,
global_batch_size: int,
cache: bool = False,
repeat_final_dataset: bool = True) -> Iterator[Dict[str, spec.Tensor]]:
shuffle = split in ['train', 'eval_train']
assert num_train_examples + num_validation_examples == 60000
if shuffle:
tfds_split = f'train[:{num_train_examples}]'
elif split == 'validation':
tfds_split = f'train[{num_train_examples}:]'
else:
tfds_split = 'test'
ds = tfds.load(
'mnist', split=tfds_split, shuffle_files=False, data_dir=data_dir)
ds = ds.map(
lambda x: {
'inputs': _normalize(x['image'], train_mean, train_stddev),
'targets': x['label'],
})
is_train = split == 'train'
if cache:
ds = ds.cache()
if shuffle:
ds = ds.repeat()
ds = ds.shuffle(16 * global_batch_size, seed=data_rng[0])
ds = ds.batch(global_batch_size, drop_remainder=is_train)
if repeat_final_dataset:
ds = ds.repeat()
ds = map(
functools.partial(
data_utils.shard_and_maybe_pad_np,
global_batch_size=global_batch_size),
ds)
return iter(ds)
class BaseMnistWorkload(spec.Workload):
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'accuracy'
def has_reached_validation_target(self, eval_result: Dict[str,
float]) -> bool:
return eval_result['validation/accuracy'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 0.97
def has_reached_test_target(self, eval_result: Dict[str, float]) -> bool:
return eval_result['test/accuracy'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 0.97
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
return 50000
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
return 10000
@property
def num_test_examples(self) -> int:
return 10000
@property
def eval_batch_size(self) -> int:
return 10000
@property
def train_mean(self) -> float:
return 0.1307
@property
def train_stddev(self) -> float:
return 0.3081
@property
def max_allowed_runtime_sec(self) -> int:
return 60
@property
def eval_period_time_sec(self) -> int:
return 10
@abc.abstractmethod
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del num_batches
ds = _build_mnist_dataset(
data_rng=data_rng,
num_train_examples=self.num_train_examples,
num_validation_examples=self.num_validation_examples,
train_mean=self.train_mean,
train_stddev=self.train_stddev,
split=split,
data_dir=data_dir,
global_batch_size=global_batch_size,
cache=cache,
repeat_final_dataset=repeat_final_dataset)
return ds
@property
def step_hint(self) -> int:
# Note that the target setting algorithms were not actually run on this
# workload, but for completeness we provide the number of steps for 10
# epochs at batch size 64.
return 7813
def _eval_model(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Dict[spec.Tensor, spec.ModelAuxiliaryState]:
raise NotImplementedError
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del global_step
data_rng, model_rng = prng.split(rng, 2)
if split not in self._eval_iters:
self._eval_iters[split] = self._build_input_queue(
data_rng=data_rng,
split=split,
data_dir=data_dir,
global_batch_size=global_batch_size,
cache=True,
repeat_final_dataset=True)
total_metrics = {
'accuracy': 0.,
'loss': 0.,
}
num_batches = int(math.ceil(num_examples / global_batch_size))
num_devices = max(torch.cuda.device_count(), jax.local_device_count())
for _ in range(num_batches):
batch = next(self._eval_iters[split])
per_device_model_rngs = prng.split(model_rng, num_devices)
batch_metrics = self._eval_model(params,
batch,
model_state,
per_device_model_rngs)
total_metrics = {
k: v + batch_metrics[k] for k, v in total_metrics.items()
}
return self._normalize_eval_metrics(num_examples, total_metrics)
|
"""MNIST workload implemented in Jax."""
import functools
from typing import Any, Dict, Optional, Tuple
from flax import jax_utils
from flax import linen as nn
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.mnist.workload import BaseMnistWorkload
class _Model(nn.Module):
@nn.compact
def __call__(self, x: spec.Tensor, train: bool) -> spec.Tensor:
del train
input_size = 28 * 28
num_hidden = 128
num_classes = 10
x = x.reshape((x.shape[0], input_size))
x = nn.Dense(features=num_hidden, use_bias=True)(x)
x = nn.sigmoid(x)
x = nn.Dense(features=num_classes, use_bias=True)(x)
return x
class MnistWorkload(BaseMnistWorkload):
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Dropout is unused."""
del dropout_rate
del aux_dropout_rate
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
self._model = _Model()
initial_params = self._model.init({'params': rng}, init_val,
train=True)['params']
self._param_shapes = param_utils.jax_param_shapes(initial_params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
return jax_utils.replicate(initial_params), None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_1'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
del update_batch_norm
train = mode == spec.ForwardPassMode.TRAIN
logits_batch = self._model.apply(
{'params': params},
augmented_and_preprocessed_input_batch['inputs'],
train=train)
return logits_batch, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
one_hot_targets = jax.nn.one_hot(label_batch, 10)
smoothed_targets = optax.smooth_labels(one_hot_targets, label_smoothing)
per_example_losses = -jnp.sum(
smoothed_targets * nn.log_softmax(logits_batch), axis=-1)
# `mask_batch` is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0, None),
static_broadcasted_argnums=(0,))
def _eval_model(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
weights = batch.get('weights')
if weights is None:
weights = jnp.ones(len(logits))
accuracy = jnp.sum(
(jnp.argmax(logits, axis=-1) == batch['targets']) * weights)
summed_loss = self.loss_fn(batch['targets'], logits, weights)['summed']
metrics = {'accuracy': accuracy, 'loss': summed_loss}
metrics = lax.psum(metrics, axis_name='batch')
return metrics
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
return jax.tree_map(lambda x: float(x[0] / num_examples), total_metrics)
|
"""MNIST workload implemented in PyTorch."""
from collections import OrderedDict
import contextlib
from typing import Any, Dict, Iterator, Optional, Tuple
import torch
from torch import nn
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import init_utils
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
from algorithmic_efficiency.workloads.mnist.workload import BaseMnistWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup()
class _Model(nn.Module):
def __init__(self) -> None:
super().__init__()
input_size = 28 * 28
num_hidden = 128
num_classes = 10
self.net = nn.Sequential(
OrderedDict([('layer1',
torch.nn.Linear(input_size, num_hidden, bias=True)),
('layer1_sig', torch.nn.Sigmoid()),
('layer2',
torch.nn.Linear(num_hidden, num_classes, bias=True))]))
def reset_parameters(self) -> None:
for m in self.net.modules():
if isinstance(m, nn.Linear):
init_utils.pytorch_default_init(m)
def forward(self, x: spec.Tensor) -> spec.Tensor:
x = x.view(x.size()[0], -1)
return self.net(x)
class MnistWorkload(BaseMnistWorkload):
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del cache
if N_GPUS != 0:
per_device_batch_size = int(global_batch_size / N_GPUS)
else:
per_device_batch_size = int(global_batch_size)
# Only create and iterate over tf input pipeline in one Python process to
# avoid creating too many threads.
if RANK == 0:
np_iter = super()._build_input_queue(data_rng,
split,
data_dir,
global_batch_size,
num_batches,
repeat_final_dataset)
while True:
if RANK == 0:
batch = next(np_iter) # pylint: disable=stop-iteration-return
inputs = torch.as_tensor(
batch['inputs'], dtype=torch.float32, device=DEVICE)
targets = torch.as_tensor(
batch['targets'], dtype=torch.long, device=DEVICE)
if 'weights' in batch:
weights = torch.as_tensor(
batch['weights'], dtype=torch.bool, device=DEVICE)
else:
weights = torch.ones((batch['targets'].shape[-1],),
dtype=torch.bool,
device=DEVICE)
# Send batch to other devices when using DDP.
if USE_PYTORCH_DDP:
dist.broadcast(inputs, src=0)
inputs = inputs[0]
dist.broadcast(targets, src=0)
targets = targets[0]
dist.broadcast(weights, src=0)
weights = weights[0]
else:
inputs = inputs.view(-1, *inputs.shape[2:])
targets = targets.view(-1, *targets.shape[2:])
weights = weights.view(-1, *weights.shape[2:])
else:
inputs = torch.empty((N_GPUS, per_device_batch_size, 28, 28, 1),
dtype=torch.float32,
device=DEVICE)
dist.broadcast(inputs, src=0)
inputs = inputs[RANK]
targets = torch.empty((N_GPUS, per_device_batch_size),
dtype=torch.long,
device=DEVICE)
dist.broadcast(targets, src=0)
targets = targets[RANK]
weights = torch.empty((N_GPUS, per_device_batch_size),
dtype=torch.bool,
device=DEVICE)
dist.broadcast(weights, src=0)
weights = weights[RANK]
batch = {
'inputs': inputs.permute(0, 3, 1, 2),
'targets': targets,
'weights': weights,
}
yield batch
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Dropout is unused."""
del dropout_rate
del aux_dropout_rate
if hasattr(self, '_model'):
if isinstance(self._model, (DDP, torch.nn.DataParallel)):
self._model.module.reset_parameters()
else:
self._model.reset_parameters()
return self._model, None
torch.random.manual_seed(rng[0])
self._model = _Model()
self._param_shapes = param_utils.pytorch_param_shapes(self._model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
self._model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
self._model = DDP(self._model, device_ids=[RANK], output_device=RANK)
else:
self._model = torch.nn.DataParallel(self._model)
return self._model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['net.layer2.weight', 'net_layer2.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
del update_batch_norm
model = params
if mode == spec.ForwardPassMode.EVAL:
model.eval()
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits_batch = model(augmented_and_preprocessed_input_batch['inputs'])
return logits_batch, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
per_example_losses = F.cross_entropy(
logits_batch,
label_batch,
reduction='none',
label_smoothing=label_smoothing)
# `mask_batch` is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': torch.as_tensor(n_valid_examples, device=DEVICE),
'per_example': per_example_losses,
}
def _eval_model(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Dict[spec.Tensor, spec.ModelAuxiliaryState]:
"""Return the mean accuracy and loss as a dict."""
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
targets = batch['targets']
weights = batch.get('weights')
if weights is None:
weights = torch.ones(len(logits), device=DEVICE)
_, predicted = torch.max(logits.data, 1)
# Number of correct predictions.
accuracy = ((predicted == targets) * weights).sum()
summed_loss = self.loss_fn(targets, logits, weights)['summed']
return {'accuracy': accuracy, 'loss': summed_loss}
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
if USE_PYTORCH_DDP:
for metric in total_metrics.values():
dist.all_reduce(metric)
return {k: float(v.item() / num_examples) for k, v in total_metrics.items()}
|
from algorithmic_efficiency.workloads.librispeech_conformer import workload
class BaseDeepspeechLibrispeechWorkload(workload.BaseLibrispeechWorkload):
@property
def validation_target_value(self) -> float:
return 0.1162
@property
def test_target_value(self) -> float:
return 0.068093
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 80_000
@property
def max_allowed_runtime_sec(self) -> int:
return 92_509 # ~26 hours
|
r"""Deepspeech.
This model uses a deepspeech2 network to convert speech to text.
paper : https://arxiv.org/abs/1512.02595
# BiLSTM code contributed by bastings@
# github : https://github.com/bastings
# webpage : https://bastings.github.io/
"""
from typing import Any, Dict, List, Optional, Tuple, Union
from flax import linen as nn
from flax import struct
import jax
from jax.experimental import rnn
import jax.numpy as jnp
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \
librispeech_preprocessor as preprocessor
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \
spectrum_augmenter
Array = jnp.ndarray
StateType = Union[Array, Tuple[Array, ...]]
PRNGKey = Any
Shape = Tuple[int]
Dtype = Any
Carry = Any
CarryHistory = Any
Output = Any
@struct.dataclass
class DeepspeechConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int = 1024
dtype: Any = jnp.float32
encoder_dim: int = 512
num_lstm_layers: int = 6
num_ffn_layers: int = 3
conv_subsampling_factor: int = 2
conv_subsampling_layers: int = 2
use_specaug: bool = True
freq_mask_count: int = 2
freq_mask_max_bins: int = 27
time_mask_count: int = 10
time_mask_max_frames: int = 40
time_mask_max_ratio: float = 0.05
time_masks_per_frame: float = 0.0
use_dynamic_time_mask_max_frames: bool = True
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
# If None, defaults to 0.1.
input_dropout_rate: Optional[float] = 0.1
# If None, defaults to 0.1.
feed_forward_dropout_rate: Optional[float] = 0.1
enable_residual_connections: bool = True
enable_decoder_layer_norm: bool = True
bidirectional: bool = True
class Subsample(nn.Module):
"""Module to perform strided convolution in order to subsample inputs.
Attributes:
encoder_dim: model dimension of conformer.
input_dropout_rate: dropout rate for inputs.
"""
config: DeepspeechConfig
@nn.compact
def __call__(self, inputs, output_paddings, train):
config = self.config
outputs = jnp.expand_dims(inputs, axis=-1)
outputs, output_paddings = Conv2dSubsampling(
encoder_dim=config.encoder_dim,
dtype=config.dtype,
batch_norm_momentum=config.batch_norm_momentum,
batch_norm_epsilon=config.batch_norm_epsilon,
input_channels=1,
output_channels=config.encoder_dim)(outputs, output_paddings, train)
outputs, output_paddings = Conv2dSubsampling(
encoder_dim=config.encoder_dim,
dtype=config.dtype,
batch_norm_momentum=config.batch_norm_momentum,
batch_norm_epsilon=config.batch_norm_epsilon,
input_channels=config.encoder_dim,
output_channels=config.encoder_dim)(outputs, output_paddings, train)
batch_size, subsampled_lengths, subsampled_dims, channels = outputs.shape
outputs = jnp.reshape(
outputs, (batch_size, subsampled_lengths, subsampled_dims * channels))
outputs = nn.Dense(
config.encoder_dim,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
outputs)
if config.input_dropout_rate is None:
input_dropout_rate = 0.1
else:
input_dropout_rate = config.input_dropout_rate
outputs = nn.Dropout(
rate=input_dropout_rate, deterministic=not train)(
outputs)
return outputs, output_paddings
class Conv2dSubsampling(nn.Module):
"""Helper module used in Subsample layer.
1) Performs strided convolution over inputs and then applies non-linearity.
2) Also performs strided convolution over input_paddings to return the correct
paddings for downstream layers.
"""
input_channels: int = 0
output_channels: int = 0
filter_stride: List[int] = (2, 2)
padding: str = 'SAME'
encoder_dim: int = 0
dtype: Any = jnp.float32
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
def setup(self):
self.filter_shape = (3, 3, self.input_channels, self.output_channels)
self.kernel = self.param('kernel',
nn.initializers.xavier_uniform(),
self.filter_shape)
self.bias = self.param(
'bias', lambda rng, s: jnp.zeros(s, jnp.float32), self.output_channels)
@nn.compact
def __call__(self, inputs, paddings, train):
# Computing strided convolution to subsample inputs.
feature_group_count = inputs.shape[3] // self.filter_shape[2]
outputs = jax.lax.conv_general_dilated(
lhs=inputs,
rhs=self.kernel,
window_strides=self.filter_stride,
padding=self.padding,
rhs_dilation=(1, 1),
dimension_numbers=('NHWC', 'HWIO', 'NHWC'),
feature_group_count=feature_group_count)
outputs += jnp.reshape(self.bias, (1,) * (outputs.ndim - 1) + (-1,))
outputs = nn.relu(outputs)
# Computing correct paddings post input convolution.
input_length = paddings.shape[1]
stride = self.filter_stride[0]
pad_len = (input_length + stride - 1) // stride * stride - input_length
out_padding = jax.lax.conv_general_dilated(
lhs=paddings[:, :, None],
rhs=jnp.ones([1, 1, 1]),
window_strides=self.filter_stride[:1],
padding=[(0, pad_len)],
dimension_numbers=('NHC', 'HIO', 'NHC'))
out_padding = jnp.squeeze(out_padding, axis=-1)
# Mask outputs by correct paddings to ensure padded elements in inputs map
# to padded value in outputs.
outputs = outputs * (1.0 -
jnp.expand_dims(jnp.expand_dims(out_padding, -1), -1))
return outputs, out_padding
class FeedForwardModule(nn.Module):
"""Feedforward block of conformer layer."""
config: DeepspeechConfig
@nn.compact
def __call__(self, inputs, input_paddings=None, train=False):
padding_mask = jnp.expand_dims(1 - input_paddings, -1)
config = self.config
inputs = BatchNorm(config.encoder_dim,
config.dtype,
config.batch_norm_momentum,
config.batch_norm_epsilon)(inputs, input_paddings, train)
inputs = nn.Dense(
config.encoder_dim,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
inputs)
inputs = nn.relu(inputs)
inputs *= padding_mask
if config.feed_forward_dropout_rate is None:
feed_forward_dropout_rate = 0.1
else:
feed_forward_dropout_rate = config.feed_forward_dropout_rate
inputs = nn.Dropout(rate=feed_forward_dropout_rate)(
inputs, deterministic=not train)
return inputs
class LayerNorm(nn.Module):
"""Module implementing layer normalization.
This implementation is same as in this paper:
https://arxiv.org/pdf/1607.06450.pdf.
note: we multiply normalized inputs by (1 + scale) and initialize scale to
zeros, this differs from default flax implementation of multiplying by scale
and initializing to ones.
"""
dim: int = 0
epsilon: float = 1e-6
def setup(self):
self.scale = self.param('scale', nn.initializers.zeros, [self.dim])
self.bias = self.param('bias', nn.initializers.zeros, [self.dim])
@nn.compact
def __call__(self, inputs):
mean = jnp.mean(inputs, axis=-1, keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=-1, keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.epsilon)
normed_inputs *= (1 + self.scale)
normed_inputs += self.bias
return normed_inputs
class BatchNorm(nn.Module):
"""Implements batch norm respecting input paddings.
This implementation takes into account input padding by masking inputs before
computing mean and variance.
This is inspired by lingvo jax implementation of BatchNorm:
https://github.com/tensorflow/lingvo/blob/84b85514d7ad3652bc9720cb45acfab08604519b/lingvo/jax/layers/normalizations.py#L92
and the corresponding defaults for momentum and epsilon have been copied over
from lingvo.
"""
encoder_dim: int = 0
dtype: Any = jnp.float32
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
def setup(self):
dim = self.encoder_dim
dtype = self.dtype
self.ra_mean = self.variable('batch_stats',
'mean',
lambda s: jnp.zeros(s, dtype),
dim)
self.ra_var = self.variable('batch_stats',
'var',
lambda s: jnp.ones(s, dtype),
dim)
self.gamma = self.param('scale', nn.initializers.zeros, dim, dtype)
self.beta = self.param('bias', nn.initializers.zeros, dim, dtype)
def _get_default_paddings(self, inputs):
"""Gets the default paddings for an input."""
in_shape = list(inputs.shape)
in_shape[-1] = 1
return jnp.zeros(in_shape, dtype=inputs.dtype)
@nn.compact
def __call__(self, inputs, input_paddings=None, train=False):
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
if input_paddings is None:
padding = self._get_default_paddings(inputs)
else:
padding = jnp.expand_dims(input_paddings, -1)
momentum = self.batch_norm_momentum
epsilon = self.batch_norm_epsilon
if train:
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=True)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=True)
sum_v = jax.lax.psum(sum_v, axis_name='batch')
count_v = jax.lax.psum(count_v, axis_name='batch')
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
variance = (inputs - mean) * (inputs - mean) * mask
sum_vv = jnp.sum(variance, axis=reduce_over_dims, keepdims=True)
sum_vv = jax.lax.psum(sum_vv, axis_name='batch')
var = sum_vv / count_v
self.ra_mean.value = momentum * self.ra_mean.value + (1 - momentum) * mean
self.ra_var.value = momentum * self.ra_var.value + (1 - momentum) * var
else:
mean = self.ra_mean.value
var = self.ra_var.value
inv = (1 + self.gamma) / jnp.sqrt(var + epsilon)
bn_output = (inputs - mean) * inv + self.beta
bn_output *= 1.0 - padding
return bn_output
# return inputs
class CudnnLSTM(nn.Module):
features: int
num_layers: int = 1
dropout_rate: float = 0.0
bidirectional: bool = False
@nn.compact
def __call__(
self,
inputs: Array,
segmentation_mask: Optional[Array] = None,
return_carry: Optional[bool] = None,
deterministic: bool = False,
initial_states: Optional[Tuple[Array, Array]] = None,
use_cuda: bool = True,
) -> Union[Array, Tuple[Array, Carry]]:
if jax.devices()[0].platform != 'gpu':
use_cuda = False
batch_size = inputs.shape[0]
input_size = inputs.shape[2]
num_directions = 2 if self.bidirectional else 1
dropout = 0.0 if deterministic else self.dropout_rate
weights = self.param(
'weights',
rnn.init_lstm_weight,
input_size,
self.features,
self.num_layers,
self.bidirectional,
)
if initial_states is None:
h_0 = jnp.zeros(
(num_directions * self.num_layers, batch_size, self.features),
jnp.float32,
)
c_0 = jnp.zeros(
(num_directions * self.num_layers, batch_size, self.features),
jnp.float32,
)
else:
h_0, c_0 = initial_states
if segmentation_mask is not None:
seq_lengths = jnp.sum(1 - segmentation_mask, axis=1, dtype=jnp.int32)
else:
seq_lengths = jnp.full((batch_size,), inputs.shape[1], dtype=jnp.int32)
if use_cuda:
y, h, c = rnn.lstm(
x=inputs, h_0=h_0, c_0=c_0, weights=weights,
seq_lengths=seq_lengths, input_size=input_size,
hidden_size=self.features, num_layers=self.num_layers,
dropout=dropout, bidirectional=self.bidirectional,
)
else:
weight_ih, weight_hh, bias_ih, bias_hh = self.unpack_weights(
weights, input_size)
y, h, c = rnn.lstm_ref(
x=inputs, h_0=h_0, c_0=c_0, W_ih=weight_ih, W_hh=weight_hh,
b_ih=bias_ih, b_hh=bias_hh, seq_lengths=seq_lengths,
input_size=input_size, hidden_size=self.features,
num_layers=self.num_layers, dropout=dropout,
bidirectional=self.bidirectional,
)
if return_carry:
return y, (h, c)
return y
@nn.nowrap
def unpack_weights(
self, weights: Array, input_size: int
) -> Tuple[
Dict[int, Array], Dict[int, Array], Dict[int, Array], Dict[int, Array]]:
return jax.experimental.rnn.unpack_lstm_weights(
weights,
input_size,
self.features,
self.num_layers,
self.bidirectional,
)
class BatchRNN(nn.Module):
"""Implements a single deepspeech encoder layer.
"""
config: DeepspeechConfig
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
inputs = BatchNorm(config.encoder_dim,
config.dtype,
config.batch_norm_momentum,
config.batch_norm_epsilon)(inputs, input_paddings, train)
output = CudnnLSTM(
features=config.encoder_dim // 2,
bidirectional=config.bidirectional,
num_layers=1)(inputs, input_paddings)
return output
class Deepspeech(nn.Module):
"""Conformer (encoder + decoder) block.
Takes audio input signals and outputs probability distribution over vocab size
for each time step. The output is then fed into a CTC loss which eliminates
the need for alignment with targets.
"""
config: DeepspeechConfig
def setup(self):
config = self.config
self.specaug = spectrum_augmenter.SpecAug(
freq_mask_count=config.freq_mask_count,
freq_mask_max_bins=config.freq_mask_max_bins,
time_mask_count=config.time_mask_count,
time_mask_max_frames=config.time_mask_max_frames,
time_mask_max_ratio=config.time_mask_max_ratio,
time_masks_per_frame=config.time_masks_per_frame,
use_dynamic_time_mask_max_frames=config.use_dynamic_time_mask_max_frames
)
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
outputs = inputs
output_paddings = input_paddings
# Compute normalized log mel spectrograms from input audio signal.
preprocessing_config = preprocessor.LibrispeechPreprocessingConfig()
outputs, output_paddings = preprocessor.MelFilterbankFrontend(
preprocessing_config,
per_bin_mean=preprocessor.LIBRISPEECH_MEAN_VECTOR,
per_bin_stddev=preprocessor.LIBRISPEECH_STD_VECTOR)(outputs,
output_paddings)
# Ablate random parts of input along temporal and frequency dimension
# following the specaug procedure in https://arxiv.org/abs/1904.08779.
if config.use_specaug and train:
outputs, output_paddings = self.specaug(outputs, output_paddings)
# Subsample input by a factor of 4 by performing strided convolutions.
outputs, output_paddings = Subsample(
config=config)(outputs, output_paddings, train)
# Run the lstm layers.
for _ in range(config.num_lstm_layers):
if config.enable_residual_connections:
outputs = outputs + BatchRNN(config)(outputs, output_paddings, train)
else:
outputs = BatchRNN(config)(outputs, output_paddings, train)
for _ in range(config.num_ffn_layers):
if config.enable_residual_connections:
outputs = outputs + FeedForwardModule(config=self.config)(
outputs, output_paddings, train)
else:
outputs = FeedForwardModule(config=self.config)(outputs,
output_paddings,
train)
# Run the decoder which in this case is a trivial projection layer.
if config.enable_decoder_layer_norm:
outputs = LayerNorm(config.encoder_dim)(outputs)
outputs = nn.Dense(
config.vocab_size,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
outputs)
return outputs, output_paddings
|
import functools
from typing import Optional
from flax import jax_utils
import jax
import jax.numpy as jnp
import numpy as np
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \
LibriSpeechConformerWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax import \
models
class LibriSpeechDeepSpeechWorkload(LibriSpeechConformerWorkload):
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Deepspeech model init function.
Here we use dropout_rate as feed_forward_dropout_rate, and aux_dropout_rate
as input_dropout_rate.
"""
model_config = models.DeepspeechConfig(
feed_forward_dropout_rate=dropout_rate,
use_specaug=self.use_specaug,
input_dropout_rate=aux_dropout_rate)
self._model = models.Deepspeech(model_config)
input_shape = [(320000,), (320000,)]
fake_input_batch = [np.zeros((2, *x), jnp.float32) for x in input_shape]
model_init_fn = jax.jit(functools.partial(self._model.init, train=False))
params_rng, dropout_rng = jax.random.split(rng, 2)
variables = model_init_fn({'params': params_rng, 'dropout': dropout_rng},
*fake_input_batch)
model_state = variables['batch_stats']
params = variables['params']
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
model_state = jax_utils.replicate(model_state)
params = jax_utils.replicate(params)
return params, model_state
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_0'
|
"""This is a pytorch implementation mirroring:
https://github.com/google/init2winit/blob/master/init2winit/model_lib/conformer.py.
"""
from dataclasses import dataclass
import os
from typing import Optional, Tuple
import torch
from torch import nn
import torch.distributed.nn as dist_nn
import torch.nn.functional as F
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch import \
preprocessor
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.spectrum_augmenter import \
SpecAug
USE_PYTORCH_DDP = 'LOCAL_RANK' in os.environ
@dataclass
class DeepspeechConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int = 1024
encoder_dim: int = 512
num_lstm_layers: int = 6
num_ffn_layers: int = 3
conv_subsampling_factor: int = 2
conv_subsampling_layers: int = 2
use_specaug: bool = True
freq_mask_count: int = 2
freq_mask_max_bins: int = 27
time_mask_count: int = 10
time_mask_max_frames: int = 40
time_mask_max_ratio: float = 0.05
time_masks_per_frame: float = 0.0
use_dynamic_time_mask_max_frames: bool = True
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
# If None, defaults to 0.1.
input_dropout_rate: Optional[float] = 0.1
# If None, defaults to 0.1.
feed_forward_dropout_rate: Optional[float] = 0.1
enable_residual_connections: bool = True
enable_decoder_layer_norm: bool = True
bidirectional: bool = True
class LayerNorm(nn.Module):
def __init__(self, dim, epsilon=1e-6):
super().__init__()
self.dim = dim
self.scale = nn.Parameter(torch.zeros(self.dim))
self.bias = nn.Parameter(torch.zeros(self.dim))
self.epsilon = epsilon
def forward(self, x):
mean = x.mean(dim=-1, keepdims=True)
var = x.var(dim=-1, unbiased=False, keepdims=True)
normed_x = (x - mean) * torch.rsqrt(var + self.epsilon)
normed_x *= (1 + self.scale)
normed_x += self.bias
return normed_x
class Subsample(nn.Module):
def __init__(self, config: DeepspeechConfig):
super().__init__()
encoder_dim = config.encoder_dim
self.encoder_dim = encoder_dim
self.conv1 = Conv2dSubsampling(
input_channels=1, output_channels=encoder_dim)
self.conv2 = Conv2dSubsampling(
input_channels=encoder_dim, output_channels=encoder_dim)
self.lin = nn.LazyLinear(out_features=self.encoder_dim, bias=True)
if config.input_dropout_rate is None:
input_dropout_rate = 0.1
else:
input_dropout_rate = config.input_dropout_rate
self.dropout = nn.Dropout(p=input_dropout_rate)
def forward(self, inputs, input_paddings):
output_paddings = input_paddings
outputs = inputs[:, None, :, :]
outputs, output_paddings = self.conv1(outputs, output_paddings)
outputs, output_paddings = self.conv2(outputs, output_paddings)
batch_size, channels, subsampled_lengths, subsampled_dims = outputs.shape
outputs = outputs.permute(0, 2, 3, 1).reshape(batch_size,
subsampled_lengths,
subsampled_dims * channels)
outputs = self.lin(outputs)
outputs = self.dropout(outputs)
return outputs, output_paddings
class Conv2dSubsampling(nn.Module):
def __init__(self,
input_channels: int,
output_channels: int,
filter_stride: Tuple[int] = (2, 2),
padding: str = 'SAME',
batch_norm_momentum: float = 0.999,
batch_norm_epsilon: float = 0.001):
super().__init__()
self.input_channels = input_channels
self.output_channels = output_channels
self.filter_stride = filter_stride
self.padding = padding
self.filter_shape = (output_channels, input_channels, 3, 3)
self.kernel = nn.Parameter(
nn.init.xavier_uniform_(torch.empty(*self.filter_shape)))
self.bias = nn.Parameter(torch.zeros(output_channels))
def get_same_padding(self, input_shape):
in_height, in_width = input_shape[2:]
stride_height, stride_width = self.filter_stride
filter_height, filter_width = 3, 3
if in_height % stride_height == 0:
pad_along_height = max(filter_height - stride_height, 0)
else:
pad_along_height = max(filter_height - (in_height % stride_height), 0)
if in_width % stride_width == 0:
pad_along_width = max(filter_width - stride_width, 0)
else:
pad_along_width = max(filter_width - (in_width % stride_width), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return (pad_left, pad_right, pad_top, pad_bottom)
def forward(self, inputs, paddings):
groups = inputs.shape[1] // self.input_channels
if self.padding == 'SAME':
in_ = F.pad(inputs, self.get_same_padding(inputs.shape))
else:
in_ = inputs
outputs = F.conv2d(
input=in_,
weight=self.kernel,
bias=self.bias,
stride=self.filter_stride,
dilation=(1, 1),
groups=groups)
outputs = F.relu(outputs)
input_length = paddings.shape[1]
stride = self.filter_stride[0]
pad_len = (input_length + stride - 1) // stride * stride - input_length
out_padding = F.conv1d(
input=torch.cat([
paddings[:, None, :],
torch.zeros(
size=(paddings.shape[0], 1, pad_len), device=paddings.device)
],
dim=2),
weight=torch.ones([1, 1, 1], device=paddings.device),
stride=self.filter_stride[:1])
out_padding = out_padding.squeeze(dim=1)
outputs = outputs * (1 - out_padding[:, None, :, None])
return outputs, out_padding
class FeedForwardModule(nn.Module):
def __init__(self, config: DeepspeechConfig):
super().__init__()
self.config = config
self.bn = BatchNorm(
dim=config.encoder_dim,
batch_norm_momentum=config.batch_norm_momentum,
batch_norm_epsilon=config.batch_norm_epsilon)
self.lin = nn.LazyLinear(out_features=config.encoder_dim, bias=True)
if config.feed_forward_dropout_rate is None:
feed_forward_dropout_rate = 0.1
else:
feed_forward_dropout_rate = config.feed_forward_dropout_rate
self.dropout = nn.Dropout(p=feed_forward_dropout_rate)
def forward(self, inputs, input_paddings):
padding_mask = (1 - input_paddings)[:, :, None]
inputs = self.bn(inputs, input_paddings)
inputs = self.lin(inputs)
inputs = F.relu(inputs)
inputs = inputs * padding_mask
inputs = self.dropout(inputs)
return inputs
class BatchNorm(nn.Module):
def __init__(self, dim, batch_norm_momentum, batch_norm_epsilon):
super().__init__()
running_mean = torch.zeros(dim)
running_var = torch.ones(dim)
self.register_buffer('running_mean', running_mean)
self.register_buffer('running_var', running_var)
self.weight = nn.Parameter(torch.zeros(dim))
self.bias = nn.Parameter(torch.zeros(dim))
self.momentum = batch_norm_momentum
self.epsilon = batch_norm_epsilon
self.dim = dim
def forward(self, inputs, input_paddings):
#inputs: NHD
#padding: NH
mask = 1 - input_paddings[:, :, None]
if self.training:
count = mask.sum()
masked_inp = inputs.masked_fill(mask == 0, 0)
sum_ = (masked_inp).sum(dim=(0, 1))
if USE_PYTORCH_DDP:
sum_ = dist_nn.all_reduce(sum_)
count = dist_nn.all_reduce(count)
mean = sum_ / count
sum_ = (torch.square(masked_inp - mean) * mask).sum(dim=(0, 1))
if USE_PYTORCH_DDP:
sum_ = dist_nn.all_reduce(sum_)
var = sum_ / count
self.running_mean = self.momentum * self.running_mean + (
1 - self.momentum) * mean.detach()
self.running_var = self.momentum * self.running_var + (
1 - self.momentum) * var.detach()
else:
mean = self.running_mean
var = self.running_var
v = (1 + self.weight) * torch.rsqrt(var + self.epsilon)
bn = (inputs - mean) * v + self.bias
output = bn.masked_fill(mask == 0, 0)
return output
class BatchRNN(nn.Module):
def __init__(self, config: DeepspeechConfig):
super().__init__()
self.config = config
hidden_size = config.encoder_dim
input_size = config.encoder_dim
bidirectional = config.bidirectional
self.bidirectional = bidirectional
self.bn = BatchNorm(config.encoder_dim,
config.batch_norm_momentum,
config.batch_norm_epsilon)
if bidirectional:
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size // 2,
bidirectional=True,
batch_first=True)
else:
self.lstm = nn.LSTM(
input_size=input_size, hidden_size=hidden_size, batch_first=True)
def forward(self, inputs, input_paddings):
inputs = self.bn(inputs, input_paddings)
lengths = torch.sum(1 - input_paddings, dim=1).detach().cpu()
packed_inputs = torch.nn.utils.rnn.pack_padded_sequence(
inputs, lengths, batch_first=True, enforce_sorted=False)
packed_outputs, _ = self.lstm(packed_inputs)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(
packed_outputs, batch_first=True)
if outputs.shape[1] < inputs.shape[1]:
outputs = torch.cat([
outputs,
torch.zeros(
size=(outputs.shape[0],
inputs.shape[1] - outputs.shape[1],
outputs.shape[2]),
device=outputs.device)
],
dim=1)
return outputs
class DeepspeechEncoderDecoder(nn.Module):
def __init__(self, config: DeepspeechConfig):
super().__init__()
self.config = config
self.specaug = SpecAug(
freq_mask_count=config.freq_mask_count,
freq_mask_max_bins=config.freq_mask_max_bins,
time_mask_count=config.time_mask_count,
time_mask_max_frames=config.time_mask_max_frames,
time_mask_max_ratio=config.time_mask_max_ratio,
time_masks_per_frame=config.time_masks_per_frame,
use_dynamic_time_mask_max_frames=config.use_dynamic_time_mask_max_frames
)
preprocessing_config = preprocessor.PreprocessorConfig()
self.preprocessor = preprocessor.MelFilterbankFrontend(
preprocessing_config,
per_bin_mean=preprocessor.LIBRISPEECH_MEAN_VECTOR,
per_bin_stddev=preprocessor.LIBRISPEECH_STD_VECTOR)
self.subsample = Subsample(config=config)
self.lstms = nn.ModuleList(
[BatchRNN(config) for _ in range(config.num_lstm_layers)])
self.ffns = nn.ModuleList(
[FeedForwardModule(config) for _ in range(config.num_ffn_layers)])
if config.enable_decoder_layer_norm:
self.ln = LayerNorm(config.encoder_dim)
else:
self.ln = nn.Identity()
self.lin = nn.Linear(config.encoder_dim, config.vocab_size)
def forward(self, inputs, input_paddings):
outputs = inputs
output_paddings = input_paddings
outputs, output_paddings = self.preprocessor(outputs, output_paddings)
if self.training and self.config.use_specaug:
outputs, output_paddings = self.specaug(outputs, output_paddings)
outputs, output_paddings = self.subsample(outputs, output_paddings)
for idx in range(self.config.num_lstm_layers):
if self.config.enable_residual_connections:
outputs = outputs + self.lstms[idx](outputs, output_paddings)
else:
outputs = self.lstms[idx](outputs, output_paddings)
for idx in range(self.config.num_ffn_layers):
if self.config.enable_residual_connections:
outputs = outputs + self.ffns[idx](outputs, output_paddings)
else:
outputs = self.ffns[idx](outputs, output_paddings)
if self.config.enable_decoder_layer_norm:
outputs = self.ln(outputs)
outputs = self.lin(outputs)
return outputs, output_paddings
|
from typing import Optional
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \
initialize
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \
LibriSpeechConformerWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.models import \
DeepspeechConfig
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.models import \
DeepspeechEncoderDecoder
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup()
MAX_INPUT_LENGTH = 320000
class LibriSpeechDeepSpeechWorkload(LibriSpeechConformerWorkload):
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Deepspeech model init function.
Here we use dropout_rate as feed_forward_dropout_rate, and aux_dropout_rate
as input_dropout_rate.
"""
torch.random.manual_seed(rng[0])
model = DeepspeechEncoderDecoder(
DeepspeechConfig(
feed_forward_dropout_rate=dropout_rate,
use_specaug=self.use_specaug,
input_dropout_rate=aux_dropout_rate)).eval()
self.ctc_loss = torch.nn.CTCLoss(blank=0, reduction='none')
# Run model once to initialize lazy layers.
t = MAX_INPUT_LENGTH
wave = torch.randn((2, t))
pad = torch.zeros_like(wave)
_ = model(wave, pad)
initialize(model)
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
self.requires_sync_before_eval = False
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['lin.weight', 'lin.bias']
|
"""Input pipeline for a WMT dataset."""
import functools
import os
from typing import Dict, List, Optional, Union
import tensorflow as tf
import tensorflow_datasets as tfds
from algorithmic_efficiency import data_utils
from algorithmic_efficiency.pytorch_utils import pytorch_setup
from algorithmic_efficiency.workloads.wmt import tokenizer
RANK = pytorch_setup()[1]
# Avoid multithreading in all processes but the first (rank 0).
AUTOTUNE = tf.data.AUTOTUNE if RANK == 0 else None
Features = Dict[str, tf.Tensor]
TFDS_SPLIT_NAME = {
'train': 'train',
'eval_train': 'train',
'validation': 'validation',
'test': 'test',
}
def normalize_feature_names(ds_info, features: Features) -> Features:
"""Normalizes feature names to 'inputs' and 'targets'."""
input_lang, target_lang = ds_info.supervised_keys
features['inputs'] = features.pop(input_lang)
features['targets'] = features.pop(target_lang)
return features
def pack_dataset(dataset: tf.data.Dataset,
key2length: Union[int, Dict[str, int]],
keys: Optional[List[str]] = None) -> tf.data.Dataset:
"""Creates a 'packed' version of a dataset on-the-fly.
Adapted from the mesh-tf implementation.
This is meant to replace the irritation of having to create a separate
"packed" version of a dataset to train efficiently on TPU.
Each example in the output dataset represents several examples in the
input dataset.
For each key in the input dataset, two additional keys are created:
<key>_segmentation: an int32 tensor identifying the parts
representing the original example.
<key>_position: an int32 tensor identifying the position within the original
example.
Example:
Two input examples get combined to form an output example.
The input examples are:
{"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
{"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
The output example is:
{
"inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
"inputs_segmentations": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
"inputs_positions": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
"targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
"targets_segmentations": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
"targets_positions": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
}
0 represents padding in both the inputs and the outputs.
Sequences in the incoming examples are truncated to length "length", and the
sequences in the output examples all have fixed (padded) length "length".
Args:
dataset: a tf.data.Dataset
key2length: an integer, or a dict from feature-key to integer
keys: a list of strings (e.g. ["inputs", "targets"])
Returns:
a tf.data.Dataset
"""
shapes = tf.nest.map_structure(lambda spec: spec.shape, dataset.element_spec)
if keys is None:
keys = list(shapes.keys())
for k in keys:
if k not in shapes:
raise ValueError(
f'Key {k} not found in dataset. Available keys are {shapes.keys()}')
if not shapes[k].is_compatible_with(tf.TensorShape([None])):
raise ValueError('Tensors to be packed must be one-dimensional.')
# make sure that the length dictionary contains all keys as well as the
# keys suffixed by "_segmentation" and "_position"
if isinstance(key2length, int):
key2length = {k: key2length for k in keys}
for k in keys:
for suffix in ['_segmentation', '_position']:
key2length[k + suffix] = key2length[k]
# trim to length
dataset = dataset.map(
lambda x: {k: x[k][:key2length[k]] for k in keys},
num_parallel_calls=AUTOTUNE)
# Setting batch_size=length ensures that the concatenated sequences (if they
# have length >=1) are sufficient to fill at least one packed example.
batch_size = max(key2length.values())
dataset = dataset.padded_batch(
batch_size, padded_shapes={k: [-1] for k in keys})
dataset = _pack_with_tf_ops(dataset, keys, key2length)
# Set the Tensor shapes correctly since they get lost in the process.
def my_fn(x):
return {k: tf.reshape(v, [key2length[k]]) for k, v in x.items()}
return dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
def _pack_with_tf_ops(dataset: tf.data.Dataset,
keys: List[str],
key2length: Dict[str, int]) -> tf.data.Dataset:
"""Helper-function for packing a dataset which has already been batched.
Helper for pack_dataset() Uses tf.while_loop.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings
key2length: an dict from feature-key to integer
Returns:
a dataset.
"""
empty_example = {}
for k in keys:
empty_example[k] = tf.zeros([0], dtype=tf.int32)
empty_example[k + '_position'] = tf.zeros([0], dtype=tf.int32)
keys_etc = empty_example.keys()
def write_packed_example(partial, outputs):
new_partial = empty_example.copy()
new_outputs = {}
for k in keys_etc:
new_outputs[k] = outputs[k].write(
outputs[k].size(),
tf.pad(partial[k], [[0, key2length[k] - tf.size(partial[k])]]))
return new_partial, new_outputs
def map_fn(x):
"""Internal function to flat_map over.
Consumes a batch of input examples and produces a variable number of output
examples.
Args:
x: a single example
Returns:
a tf.data.Dataset
"""
partial = empty_example.copy()
i = tf.zeros([], dtype=tf.int32)
dynamic_batch_size = tf.shape(x[keys[0]])[0]
outputs = {}
for k in keys:
outputs[k] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[key2length[k]])
outputs[k + '_position'] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[key2length[k]])
def body_fn(i, partial, outputs):
"""Body function for while_loop.
Args:
i: integer scalar
partial: dictionary of Tensor (partially-constructed example)
outputs: dictionary of TensorArray
Returns:
A triple containing the new values of the inputs.
"""
can_append = True
one_example = {}
for k in keys:
val = tf.cast(x[k][i], tf.int32)
val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
one_example[k] = val
for k in keys:
can_append = tf.logical_and(
can_append,
tf.less_equal(
tf.size(partial[k]) + tf.size(one_example[k]), key2length[k]))
def false_fn():
return write_packed_example(partial, outputs)
def true_fn():
return partial, outputs
partial, outputs = tf.cond(can_append, true_fn, false_fn)
new_partial = {}
for k in keys:
new_seq = one_example[k][:key2length[k]]
new_seq_len = tf.size(new_seq)
new_partial[k] = tf.concat([partial[k], new_seq], 0)
new_partial[k + '_position'] = tf.concat(
[partial[k + '_position'], tf.range(new_seq_len)], 0)
partial = new_partial
return i + 1, partial, outputs
# For loop over all examples in the batch.
i, partial, outputs = tf.while_loop(
cond=lambda *_: True,
body=body_fn,
loop_vars=(i, partial, outputs),
shape_invariants=(
tf.TensorShape([]),
{k: tf.TensorShape([None]) for k in keys_etc},
{k: tf.TensorShape(None) for k in keys_etc},
),
maximum_iterations=dynamic_batch_size)
_, outputs = write_packed_example(partial, outputs)
packed = {k: outputs[k].stack() for k in keys_etc}
for k in keys:
packed[k + '_segmentation'] = (
tf.cumsum(
tf.cast(tf.equal(packed[k + '_position'], 0), tf.int32), axis=1) *
tf.cast(tf.not_equal(packed[k], 0), tf.int32))
return packed
dataset = dataset.map(map_fn, num_parallel_calls=AUTOTUNE)
return dataset.unbatch()
def preprocess_wmt_data(dataset: tf.data.Dataset,
data_rng,
train: bool,
shuffle: bool,
shuffle_buffer_size: int = 1024,
max_length: int = 256,
global_batch_size: int = 128):
"""Shuffle and batch/pack the given dataset."""
def length_filter(max_len):
def filter_fn(x):
source, target = x['inputs'], x['targets']
l = tf.maximum(tf.shape(source)[0], tf.shape(target)[0])
return tf.less(l, max_len + 1)
return filter_fn
if max_length > 0:
dataset = dataset.filter(length_filter(max_length))
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, seed=data_rng[0])
if train:
dataset = dataset.repeat()
dataset = pack_dataset(dataset, max_length)
dataset = dataset.batch(global_batch_size, drop_remainder=train)
else: # simple (static-shape) padded batching
dataset = dataset.padded_batch(
global_batch_size,
padded_shapes={'inputs': max_length, 'targets': max_length},
padding_values={'inputs': 0, 'targets': 0},
drop_remainder=False)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def get_wmt_dataset(data_rng,
split: str,
data_dir: str,
is_training: bool,
vocab_size: int,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False,
vocab_path: Optional[str] = None):
"""Load and return dataset of batched examples for use during training."""
if vocab_path is None:
vocab_path = os.path.join(data_dir, 'wmt_sentencepiece_model')
if split in ['validation', 'test']:
ds_name = 'wmt14_translate/de-en:1.0.0'
else:
ds_name = 'wmt17_translate/de-en:1.0.0'
dataset_builder = tfds.builder(ds_name, data_dir=data_dir)
ds = dataset_builder.as_dataset(
split=TFDS_SPLIT_NAME[split], shuffle_files=False)
# Avoid creating too many threads when using PyTorch DDP.
if RANK != 0:
options = tf.data.Options()
options.threading.private_threadpool_size = 1
ds = ds.with_options(options)
ds = ds.map(
functools.partial(normalize_feature_names, dataset_builder.info),
num_parallel_calls=AUTOTUNE)
# Load tf-text SentencePiece tokenizer.
sp_tokenizer = tokenizer.load_tokenizer(vocab_path=vocab_path)
ds = ds.map(tokenizer.TokenizeOp(sp_tokenizer), num_parallel_calls=AUTOTUNE)
shuffle = split in ['train', 'eval_train']
ds = preprocess_wmt_data(
ds,
data_rng,
train=is_training,
shuffle=shuffle,
global_batch_size=global_batch_size,
max_length=256)
if num_batches:
ds = ds.take(num_batches)
if repeat_final_dataset:
ds = ds.repeat()
ds = map(
functools.partial(
data_utils.shard_and_maybe_pad_np,
global_batch_size=global_batch_size),
ds)
return ds, sp_tokenizer
|
from itertools import zip_longest
from typing import Sequence
from absl import logging
import sacrebleu
import torch
import torch.distributed as dist
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP, _, DEVICE, N_GPUS = pytorch_setup()
# Modified (added sync for PyTorch DDP) from
# https://github.com/mjpost/sacrebleu/blob/v1.3.1/sacrebleu.py.
# Assumes that sacrebleu==1.3.1 is installed.
def corpus_bleu(sys_stream: Sequence[str],
ref_streams: Sequence[str],
smooth_method: str = 'exp',
smooth_value: float = 0.0,
force: bool = False,
lowercase: bool = False,
tokenize: str = '13a',
use_effective_order: bool = False) -> sacrebleu.BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source
against one or more references.
:param sys_stream: The system stream (a sequence of segments).
:param ref_streams: A list of one or more reference streams
(each a sequence of segments).
:param smooth: The smoothing method to use.
:param smooth_value: For 'floor' smoothing, the floor to use.
:param force: Ignore data that looks already tokenized.
:param lowercase: Lowercase the data.
:param tokenize: The tokenizer to use.
:return: A BLEU object containing everything you'd want.
"""
# Add some robustness to the input arguments.
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [0 for _ in range(sacrebleu.NGRAM_ORDER)]
total = [0 for _ in range(sacrebleu.NGRAM_ORDER)]
# Look for already-tokenized sentences.
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError('Source and reference streams have different lengths!')
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(' .'):
tokenized_count += 1
if tokenized_count == 100:
logging.warning(
'That\'s 100 lines that end in a tokenized period (\'.\')')
logging.warning('It looks like you forgot to detokenize your test '
'data, which may hurt your score.')
logging.warning('If you insist your data is detokenized, '
'or don\'t care, you can suppress this message with '
'\'--force\'.')
output, *refs = [sacrebleu.TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, _, closest_len = sacrebleu.ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = sacrebleu.extract_ngrams(output)
for ngram, sys_ngram in sys_ngrams.items():
n = len(ngram.split())
correct[n - 1] += min(sys_ngram, ref_ngrams.get(ngram, 0))
total[n - 1] += sys_ngram
# When using PyTorch DDP, get stats from all processes and sum them.
if USE_PYTORCH_DDP:
# Sum `sys_len` and `ref_len` integers from all processes.
sys_len = torch.tensor(sys_len, dtype=torch.int64, device=DEVICE)
dist.all_reduce(sys_len)
sys_len = sys_len.item()
ref_len = torch.tensor(ref_len, dtype=torch.int64, device=DEVICE)
dist.all_reduce(ref_len)
ref_len = ref_len.item()
# Sum `correct` and `total` sequences from all processes.
correct = torch.tensor(correct, dtype=torch.int64, device=DEVICE)
dist.all_reduce(correct)
correct = correct.cpu().numpy().tolist()
total = torch.tensor(total, dtype=torch.int64, device=DEVICE)
dist.all_reduce(total)
total = total.cpu().numpy().tolist()
return sacrebleu.compute_bleu(
correct,
total,
sys_len,
ref_len,
smooth_method=smooth_method,
smooth_value=smooth_value,
use_effective_order=use_effective_order)
|
"""WMT workload parent class."""
import abc
import math
import os
from typing import Any, Dict, Optional, Tuple
import jax
import numpy as np
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.wmt import input_pipeline
from algorithmic_efficiency.workloads.wmt.wmt_jax import decode
VOCAB_PATH = './wmt_256/sentencepiece_model'
WORKDIR = './wmt_256'
USE_PYTORCH_DDP = 'LOCAL_RANK' in os.environ
class BaseWmtWorkload(spec.Workload):
"""A WMT workload."""
_vocab_size: int = 32000
def __init__(self) -> None:
super().__init__()
self._tokenizer = None
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'bleu'
def has_reached_validation_target(self, eval_result: float) -> bool:
return eval_result['validation/bleu'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 30.8491
def has_reached_test_target(self, eval_result: float) -> bool:
return eval_result['test/bleu'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 30.7219
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
# wmt17_translate/de-en 'train' split size
return 5906184
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
# wmt14_translate/de-en 'validation' split size.
return 3000
@property
def num_test_examples(self) -> int:
# wmt14_translate/de-en 'test' split size.
return 3003
@property
def eval_batch_size(self) -> int:
return 128
@property
def train_mean(self):
raise NotImplementedError
@property
def train_stddev(self):
raise NotImplementedError
@property
def max_allowed_runtime_sec(self) -> int:
return 48_151 # ~13.5 hours
@property
def eval_period_time_sec(self) -> int:
return 14 * 60
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 133_333
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False):
is_training = split == 'train'
ds, self._tokenizer = input_pipeline.get_wmt_dataset(
data_rng,
split,
data_dir,
is_training=is_training,
vocab_size=self._vocab_size,
global_batch_size=global_batch_size,
num_batches=num_batches,
repeat_final_dataset=repeat_final_dataset)
# Separate function is necessary because the code above has to be executed
# when _build_input_queue is called (not when next() is first called on it).
def _input_queue_generator():
for batch in iter(ds):
weights = batch.get('weights')
updated_weights = np.where(batch['targets'] > 0, 1, 0)
if weights is not None:
updated_weights = np.logical_and(weights, updated_weights)
batch['weights'] = updated_weights
yield batch
return _input_queue_generator()
@abc.abstractmethod
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del model_state
del global_step
num_batches = int(math.ceil(num_examples / global_batch_size))
if split not in self._eval_iters:
# These iterators will repeat indefinitely.
self._eval_iters[split] = self._build_input_queue(
rng,
split,
data_dir,
global_batch_size,
num_batches,
repeat_final_dataset=True)
eval_metrics = {}
for _ in range(num_batches):
eval_batch = next(self._eval_iters[split])
metrics = self.eval_step(params, eval_batch)
for metric_name, metric_value in metrics.items():
if metric_name not in eval_metrics:
eval_metrics[metric_name] = 0.0
eval_metrics[metric_name] += metric_value
eval_results = self._normalize_eval_metrics(num_examples, eval_metrics)
eval_results['bleu'] = self.translate_and_calculate_bleu(
params=params,
ds_iter=self._eval_iters[split],
num_batches=num_batches,
max_predict_length=256)
return eval_results
def compute_weighted_accuracy(
self, logits: spec.Tensor, targets: spec.Tensor,
weights: spec.Tensor) -> Tuple[spec.Tensor, spec.Tensor]:
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: array of shape [batch, length]
Returns:
Tuple of scalar summed accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(f'Incorrect shapes. Got shape {logits.shape} logits and '
f'{targets.shape} targets.')
accuracy = (logits.argmax(-1) == targets) * weights
normalizing_factor = weights.sum()
return accuracy.sum(), normalizing_factor
def _decode_tokens(self, toks: spec.Tensor) -> spec.Tensor:
if isinstance(toks, torch.Tensor):
toks = toks.cpu().numpy()
valid_toks = toks[:np.argmax(toks == decode.EOS_ID) + 1].astype(np.int32)
return self._tokenizer.detokenize(valid_toks).numpy().decode('utf-8')
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
return self.compute_weighted_cross_entropy(
logits_batch,
label_batch,
weights=mask_batch,
label_smoothing=label_smoothing)
|
"""Provides op for tokenizing a dataset.
Modified from https://github.com/google/flax/tree/main/examples/wmt.
"""
import dataclasses
import os
import tempfile
import time
from typing import Any, Dict, Iterable, Tuple
from absl import logging
import jax
from sentencepiece import SentencePieceTrainer
import tensorflow as tf
import tensorflow_text as tftxt
Features = Dict[str, tf.Tensor]
def _dump_chars_to_textfile(
dataset: tf.data.Dataset,
maxchars: int = int(1e7),
data_keys=('inputs', 'targets')
) -> Tuple[str, int]:
"""Write part of a TFDS sentence dataset to lines in a text file.
Args:
dataset: tf.dataset containing string-data.
maxchars: int: approximate number of characters to save from dataset.
data_keys: Tuple[str]: what keys in dataset to dump from.
Returns:
name of temp file with dataset bytes, exact number of characters dumped.
"""
char_count = 0
ds_iter = dataset.as_numpy_iterator()
with tempfile.NamedTemporaryFile(
delete=False, prefix='/tmp/ds_chars') as outfp:
while char_count < maxchars:
example = next(ds_iter)
for k in data_keys:
line = example[k] + b'\n'
char_count += len(line)
outfp.write(line)
return outfp.name, char_count
def _train_sentencepiece(dataset: tf.data.Dataset,
*,
vocab_size: int,
maxchars: int = int(1e7),
model_path: str,
model_type: str = 'unigram',
character_coverage: float = 1.0,
data_keys=('inputs', 'targets')):
"""Train SentencePiece tokenizer from subset of tf dataset.
Args:
dataset: tf.dataset
vocab_size: int: size of vocab tokens to train.
maxchars: int: number of characters to use for sentencepiece training.
model_path: str: path of model file to save vocab model to.
model_type: str: type of sentencepiece vocab to train.
character_coverage: amount of characters covered by the model, good defaults
are 0.9995 for languages with rich character set like Japanese or Chinese
and 1.0 for other languages with small character set.
data_keys: Tuple[str]: keys of dataset to use for training.
Returns:
path to the trained sentencepiece vocabulary model.
"""
if model_path.startswith('gs://'):
abs_model_path = model_path
else:
abs_model_path = os.path.abspath(os.path.expanduser(model_path))
fname, _ = _dump_chars_to_textfile(
dataset, maxchars=maxchars, data_keys=data_keys)
with tempfile.NamedTemporaryFile(
delete=False, prefix='/tmp/sp_tmp') as model_fp:
pass # we just want a prefix'd tmp-filename
argstr = ' '.join([
f'--input={fname}',
f'--vocab_size={vocab_size}',
f'--character_coverage={character_coverage}',
f'--model_prefix={model_fp.name}',
f'--model_type={model_type}',
])
SentencePieceTrainer.Train(argstr)
if jax.process_index() == 0:
# Use an intermediate filename that is renamed to the target name to address
# create and fill delays.
copy_rename_path = abs_model_path + '.rntmp'
tf.io.gfile.copy(model_fp.name + '.model', copy_rename_path, overwrite=True)
tf.io.gfile.rename(copy_rename_path, abs_model_path, overwrite=True)
logging.info('copied %s to %s', model_fp.name + '.model', abs_model_path)
else:
while not tf.io.gfile.exists(abs_model_path):
time.sleep(1)
time.sleep(1)
def _load_sentencepiece_tokenizer(model_path: str,
add_bos: bool = False,
add_eos: bool = True,
reverse: bool = False):
"""Load a tf-text SentencePiece tokenizer from given model filepath."""
with tf.io.gfile.GFile(model_path, 'rb') as model_fp:
sp_model = model_fp.read()
sp_tokenizer = tftxt.SentencepieceTokenizer(
model=sp_model, add_bos=add_bos, add_eos=add_eos, reverse=reverse)
return sp_tokenizer
def train_tokenizer(dataset: tf.data.Dataset,
*,
vocab_path: str,
vocab_size: int,
max_corpus_chars: int,
data_keys: Tuple[str, str] = ('inputs', 'targets')):
"""Trains a tokenizer from `dataset`."""
logging.info('Building SentencePiece vocab from data.')
_train_sentencepiece(
dataset,
vocab_size=vocab_size,
maxchars=max_corpus_chars,
model_path=vocab_path,
data_keys=data_keys)
def load_tokenizer(vocab_path: str):
"""Loads the tokenizer at `vocab_path`."""
return _load_sentencepiece_tokenizer(os.path.expanduser(vocab_path))
@dataclasses.dataclass
class TokenizeOp:
sp_tokenizer: Any
data_keys: Iterable[str] = ('inputs', 'targets')
def __call__(self, features: Features) -> Features:
for k in self.data_keys:
features[k] = self.sp_tokenizer.tokenize(features[k])
return features
|
import copy
import math
from typing import Any, Callable, Dict, Optional, Tuple, Union
import warnings
import torch
from torch import nn
from torch import Tensor
import torch.nn.functional as F
from torch.nn.init import normal_
from torch.nn.init import xavier_uniform_
# Mask making utilities ported to PyTorch from
# https://github.com/google/flax/blob/main/flax/linen/attention.py.
def make_attention_mask(query_input: Tensor,
key_input: Tensor,
pairwise_fn: Callable[..., Any] = torch.mul,
dtype: torch.dtype = torch.float32) -> Tensor:
"""Mask-making helper for attention weights.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
dtype: mask return dtype
Returns:
A `[batch..., len_q, len_kv]` shaped attention mask.
"""
mask = pairwise_fn(query_input.unsqueeze(-1), key_input.unsqueeze(-2))
return mask.to(dtype)
def make_causal_mask(x: Tensor,
device: str = 'cuda:0',
dtype: torch.dtype = torch.float32) -> Tensor:
"""Make a causal mask for self-attention.
Args:
x: input array of shape `[batch..., len]`
device: device to store the idxs
dtype: mask return dtype
Returns:
A `[batch..., len, len]` shaped causal attention mask.
"""
idxs = torch.broadcast_to(
torch.arange(x.shape[-1], dtype=torch.int32, device=device), x.shape)
return make_attention_mask(idxs, idxs, torch.greater_equal, dtype=dtype)
def make_src_mask(src, inputs_segmentation, nhead):
"""Utility for creating src mask and adjust it for PyTorch Transformer API."""
src_mask = make_attention_mask(src > 0, src > 0)
# Add segmentation block-diagonal attention mask if using segmented data.
if inputs_segmentation is not None:
src_mask = torch.logical_and(
src_mask,
make_attention_mask(inputs_segmentation, inputs_segmentation, torch.eq))
# Flip values and ensure numerical stability.
src_mask = torch.repeat_interleave(
torch.logical_not(src_mask), repeats=nhead, dim=0)
new_src_mask = torch.zeros_like(src_mask, dtype=torch.float32)
new_src_mask.masked_fill_(src_mask, -1e10)
return new_src_mask
def make_tgt_and_memory_mask(tgt,
src,
inputs_segmentation,
targets_segmentation,
decode,
nhead):
""" Utility for creating target and memory mask and adjust them for PyTorch
Transformer API."""
if not decode:
tgt_mask = torch.logical_and(
make_attention_mask(tgt > 0, tgt > 0),
make_causal_mask(tgt, device=tgt.device))
memory_mask = make_attention_mask(tgt > 0, src > 0)
else:
tgt_mask = None
memory_mask = make_attention_mask(torch.ones_like(tgt) > 0, src > 0)
# Add segmentation block-diagonal attention masks if using segmented data.
if inputs_segmentation is not None:
tgt_mask = torch.logical_and(
tgt_mask,
make_attention_mask(targets_segmentation,
targets_segmentation,
torch.eq))
memory_mask = torch.logical_and(
memory_mask,
make_attention_mask(targets_segmentation, inputs_segmentation,
torch.eq))
# Flip values and ensure numerical stability.
memory_mask = torch.repeat_interleave(
torch.logical_not(memory_mask), repeats=nhead, dim=0)
new_memory_mask = torch.zeros_like(memory_mask, dtype=torch.float32)
new_memory_mask.masked_fill_(memory_mask, -1e10)
if tgt_mask is not None:
tgt_mask = torch.repeat_interleave(
torch.logical_not(tgt_mask), repeats=nhead, dim=0)
new_tgt_mask = torch.zeros_like(tgt_mask, dtype=torch.float32)
new_tgt_mask.masked_fill_(tgt_mask, -1e10)
tgt_mask = new_tgt_mask
return tgt_mask, new_memory_mask
def shift_right(x, axis=1):
"""Shift the input to the right by padding on axis 1."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
pad_widths = tuple(t for tup in reversed(pad_widths) for t in tup)
padded = F.pad(x, pad_widths, mode='constant')
return padded[:, :-1]
class Transformer(nn.Module):
"""Transformer architecture based on the model from the WMT Jax workload."""
def __init__(self,
ntoken: int = 32000,
d_model: int = 1024,
nhead: int = 16,
d_hid: int = 1024,
nlayers: int = 6,
dropout_rate: Optional[float] = 0.1,
attention_dropout_rate: Optional[float] = 0.1,
layer_norm_eps: float = 1e-6):
super().__init__()
if dropout_rate is None:
dropout_rate = 0.1
if attention_dropout_rate is None:
attention_dropout_rate = 0.1
self.pos_encoder = PositionalEncoding(d_model, dropout_rate)
self.shared_embedding = nn.Embedding(ntoken, d_model)
self.encoder = Encoder(d_model,
nhead,
d_hid,
nlayers,
dropout_rate,
attention_dropout_rate,
layer_norm_eps)
self.decoder = Decoder(d_model,
nhead,
d_hid,
nlayers,
dropout_rate,
attention_dropout_rate,
layer_norm_eps)
# Share positional encoding and embedding between encoder and decoder.
self.encoder.pos_encoder = self.pos_encoder
self.encoder.shared_embedding = self.shared_embedding
self.decoder.pos_encoder = self.pos_encoder
self.decoder.shared_embedding = self.shared_embedding
self._reset_parameters()
def _reset_parameters(self):
"""Initiate parameters in the transformer model."""
for module in self.modules():
if isinstance(module, nn.Linear):
xavier_uniform_(module.weight)
if module.bias is not None:
normal_(module.bias, std=1e-6)
def forward(self,
src: Tensor,
tgt: Tensor,
inputs_positions: Optional[Tensor] = None,
targets_positions: Optional[Tensor] = None,
inputs_segmentation: Optional[Tensor] = None,
targets_segmentation: Optional[Tensor] = None,
decode: bool = False) -> Tensor:
"""
Args:
src: Tensor, shape [batch_size, seq_len]
tgt: Tensor, shape [batch_size, seq_len]
inputs_positions: Optional[Tensor], shape [batch_size, seq_len]
targets_positions: Optional[Tensor], shape [batch_size, seq_len]
inputs_segmentation: Optional[Tensor], shape [batch_size, seq_len]
targets_segmentation: Optional[Tensor], shape [batch_size, seq_len]
decode: bool
Returns:
output Tensor of shape [batch_size, seq_len, ntoken]
"""
if src.size(0) != tgt.size(0):
raise RuntimeError('The batch size of src and tgt must be equal.')
memory = self.encoder(
src,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation)
output = self.decoder(
tgt,
memory,
src, # just for calculating the padding mask
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation,
decode=decode)
return output
class TransformerEncoder(nn.Module):
r"""TransformerEncoder is a stack of N encoder layers. Users can build the
BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class.
num_layers: the number of sub-encoder-layers in the encoder.
norm: the layer normalization component (optional).
enable_nested_tensor: if True, input will automatically convert to
nested tensor (and convert back on output). This will improve
the overall performance of TransformerEncoder when padding
rate is high.
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(12, 8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, 6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self,
encoder_layer,
num_layers,
norm=None,
enable_nested_tensor=True,
mask_check=True):
super().__init__()
self.layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(num_layers)])
self.num_layers = num_layers
self.norm = norm
self.enable_nested_tensor = enable_nested_tensor
self.mask_check = mask_check
def forward(self,
src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
if src_key_padding_mask is not None:
_skpm_dtype = src_key_padding_mask.dtype # pylint: disable=invalid-name
if _skpm_dtype != torch.bool and not torch.is_floating_point(
src_key_padding_mask):
raise AssertionError(
'only bool and floating types of key_padding_mask are supported')
output = src
convert_to_nested = False
src_key_padding_mask_for_layers = src_key_padding_mask
for mod in self.layers:
output = mod(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask_for_layers)
if convert_to_nested:
output = output.to_padded_tensor(0.)
if self.norm is not None:
output = self.norm(output)
return output
class Encoder(nn.Module):
def __init__(self,
d_model: int = 1024,
nhead: int = 16,
d_hid: int = 1024,
nlayers: int = 6,
dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
layer_norm_eps: float = 1e-6):
super().__init__()
self.nhead = nhead
self.shared_embedding = None
self.pos_encoder = None
encoder_layer = TransformerEncoderLayer(
d_model,
nhead,
d_hid,
dropout_rate,
attention_dropout_rate=attention_dropout_rate,
layer_norm_eps=layer_norm_eps)
encoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.encoder = TransformerEncoder(encoder_layer, nlayers, encoder_norm)
def forward(self,
src: Tensor,
inputs_positions: Optional[Tensor] = None,
inputs_segmentation: Optional[Tensor] = None) -> Tensor:
src = src.to(torch.int)
src_mask = make_src_mask(src, inputs_segmentation, self.nhead)
src = self.shared_embedding(src)
src = self.pos_encoder(src, inputs_positions)
memory = self.encoder(src, mask=src_mask)
return memory
class Decoder(nn.Module):
def __init__(self,
d_model: int = 1024,
nhead: int = 16,
d_hid: int = 1024,
nlayers: int = 6,
dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
layer_norm_eps: float = 1e-6):
super().__init__()
self.nhead = nhead
self.shared_embedding = None
self.pos_encoder = None
self.decoder = TransformerDecoder(d_model,
nhead,
d_hid,
dropout_rate,
attention_dropout_rate,
layer_norm_eps,
nlayers)
def forward(
self,
tgt: Tensor,
memory: Tensor,
src: Tensor, # just for calculating the padding mask
targets_positions: Optional[Tensor] = None,
inputs_segmentation: Optional[Tensor] = None,
targets_segmentation: Optional[Tensor] = None,
decode: bool = False,
max_len: Optional[int] = None,
cache: Optional[dict] = None) -> Any:
tgt = tgt.to(torch.int)
tgt_mask, memory_mask = make_tgt_and_memory_mask(
tgt, src, inputs_segmentation, targets_segmentation,
decode, self.nhead)
if not decode:
tgt = shift_right(tgt)
tgt = self.shared_embedding(tgt)
tgt = self.pos_encoder(tgt, targets_positions, decode=decode, cache=cache)
if decode:
tgt, cache = tgt
output = self.decoder(
tgt,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
decode=decode,
max_len=max_len,
cache=cache)
if decode:
output, cache = output
normalize = math.sqrt(output.shape[-1])
output = torch.matmul(output, self.shared_embedding.weight.T) / normalize
if decode:
return output, cache
return output
class PositionalEncoding(nn.Module):
def __init__(self,
d_model: int,
dropout_rate: float = 0.1,
max_len: int = 256):
super().__init__()
self.dropout = nn.Dropout(p=dropout_rate)
position = torch.arange(max_len).unsqueeze(1)
scale_factor = -math.log(10000.0) / (d_model // 2 - 1)
div_term = torch.exp(torch.arange(d_model // 2) * scale_factor)
pe = torch.zeros(1, max_len, d_model)
pe[0, :, :d_model // 2] = torch.sin(position * div_term)
pe[0, :, d_model // 2:2 * (d_model // 2)] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(
self,
x: Tensor,
inputs_positions: Optional[Tensor] = None,
decode: bool = False,
cache: Optional[Dict[str, Dict[str, Tensor]]] = None
) -> Union[Tensor, Tuple[Tensor, Dict[str, Dict[str, Tensor]]]]:
"""
Args:
x: Tensor (shape [batch_size, seq_len, embedding_dim])
inputs_positions: Tensor (shape [batch_size, seq_len]) or None
decode: bool
cache: Dict[str, Dict[str, Tensor]] or None
Returns:
Tensor or Tuple[Tensor, Dict[str, Dict[str, Tensor]]]
"""
# We use a cache position index for tracking decoding position.
if decode:
name = self._get_name()
if cache is None:
cache = {
name: {
'cache_index':
torch.tensor(0, dtype=torch.long, device=self.pe.device),
},
}
pe = self.pe[0, cache[name]['cache_index'], :]
cache[name]['cache_index'] += 1
return self.dropout(x + pe), cache
if inputs_positions is None:
# normal unpacked case:
pe = self.pe[:, :x.size(1), :]
else:
# for packed data we need to use known position indices:
pe = self.pe[0, inputs_positions, :]
return self.dropout(x + pe)
# TransformerEncoderLayer and TransformerDecoderLayer are taken from:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/transformer.py
# Only difference is using custom MultiheadAttention modules without bias and
# '_qkv_same_embed_dim' always set to 'False'.
class TransformerEncoderLayer(nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all
you need. In Advances in Neural Information Processing Systems,
pages 6000-6010. Users may modify or implement in a different way during
application.
Args:
d_model: the number of expected features in the input (default=1024).
nhead: the number of heads in the multiheadattention models (default=16).
dim_feedforward: the dimension of the feedforward network model
(default=1024).
dropout_rate: the dropout_rate value (default=0.1).
activation: the activation function of the intermediate layer, can be a
string ("relu" or "gelu") or a unary callable (default=F.relu).
layer_norm_eps: the eps value in layer normalization components
(default=1e-6).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``True`` (batch, seq, feature).
norm_first: if ``True``, layer norm is done prior to attention and
feedforward operations, respectivaly. Otherwise it's done after.
Default: ``True``.
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
Alternatively, when ``batch_first`` is ``True``:
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8,
batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
"""
__constants__ = ['batch_first', 'norm_first']
def __init__(self,
d_model: int = 1024,
nhead: int = 16,
dim_feedforward: int = 1024,
dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-6,
batch_first: bool = True,
norm_first: bool = True,
device=None,
dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.self_attn = MultiheadAttention(
d_model,
nhead,
dropout_rate=attention_dropout_rate,
batch_first=batch_first,
bias=False,
**factory_kwargs)
# Implementation of Feedforward model.
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout_rate)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout_rate)
self.dropout2 = nn.Dropout(dropout_rate)
# We can't test self.activation in forward() in TorchScript,
# so stash some information about it instead.
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
self.activation_relu_or_gelu = 1
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
self.activation_relu_or_gelu = 2
else:
self.activation_relu_or_gelu = 0
self.activation = activation
def forward(self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
if src_key_padding_mask is not None:
_skpm_dtype = src_key_padding_mask.dtype # pylint: disable=invalid-name
if _skpm_dtype != torch.bool and not torch.is_floating_point(
src_key_padding_mask):
raise AssertionError(
'Only bool and floating types of key_padding_mask are supported')
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# Self-attention block:
def _sa_block(self,
x: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
# Feed forward block:
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
# Modified to use cache for autoregressive decoding.
class TransformerDecoder(nn.Module):
r"""TransformerDecoder is a stack of N decoder layers
Args:
d_model: the number of expected features in the input (default=1024)
nhead: the number of heads in the multiheadattention models (default=16)
d_hid: the dimension of the feedforward network model
(default=1024)
dropout_rate: the dropout_rate value (default=0.1)
layer_norm_eps: the eps value in layer normalization components
(default=1e-6).
decoder_layer: an instance of the TransformerDecoderLayer() class
num_layers: the number of sub-decoder-layers in the decoder
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(12, 8)
>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, 6)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = transformer_decoder(tgt, memory)
"""
__constants__ = ['norm']
def __init__(self,
d_model,
nhead,
d_hid,
dropout_rate,
attention_dropout_rate,
layer_norm_eps,
num_layers):
super().__init__()
self.layers = nn.ModuleList([
TransformerDecoderLayer(
d_model,
nhead,
d_hid,
dropout_rate,
attention_dropout_rate,
layer_norm_eps=layer_norm_eps) for _ in range(num_layers)
])
self.num_layers = num_layers
self.norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
def forward(self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
decode: bool = False,
max_len: Optional[int] = None,
cache: Optional[dict] = None) -> Any:
r"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
decode: wether to use cache for autoregressive decoding or not.
max_len: maximum sequence length, necessary for decoding cache.
Shape:
see the docs in Transformer class.
"""
output = tgt
for idx, mod in enumerate(self.layers):
output, cache = mod(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
decode=decode,
max_len=max_len,
cache=cache,
index=idx)
if self.norm is not None:
output = self.norm(output)
if decode:
return output, cache
return output
# Modified to use cache for autoregressive decoding.
class TransformerDecoderLayer(nn.Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and
feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all
you need. In Advances in Neural Information Processing Systems,
pages 6000-6010. Users may modify or implement in a different way during
application.
Args:
d_model: the number of expected features in the input (default=1024).
nhead: the number of heads in the multiheadattention models (default=16).
dim_feedforward: the dimension of the feedforward network model
(default=1024).
dropout_rate: the dropout_rate value (default=0.1).
activation: the activation function of the intermediate layer, can be a
string ("relu" or "gelu") or a unary callable (default=F.relu).
layer_norm_eps: the eps value in layer normalization components
(default=1e-6).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``True`` (batch, seq, feature).
norm_first: if ``True``, layer norm is done prior to self attention,
multihead attention and feedforward operations, respectivaly.
Otherwise it's done after. Default: ``True``.
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
Alternatively, when ``batch_first`` is ``True``:
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8,
batch_first=True)
>>> memory = torch.rand(32, 10, 512)
>>> tgt = torch.rand(32, 20, 512)
>>> out = decoder_layer(tgt, memory)
"""
__constants__ = ['batch_first', 'norm_first']
def __init__(self,
d_model: int = 1024,
nhead: int = 16,
dim_feedforward: int = 1024,
dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-6,
batch_first: bool = True,
norm_first: bool = True,
device=None,
dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.self_attn = MultiheadAttention(
d_model,
nhead,
dropout_rate=attention_dropout_rate,
batch_first=batch_first,
bias=False,
**factory_kwargs)
self.multihead_attn = MultiheadAttention(
d_model,
nhead,
dropout_rate=attention_dropout_rate,
batch_first=batch_first,
bias=False,
**factory_kwargs)
# Implementation of Feedforward model.
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout_rate)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout_rate)
self.dropout2 = nn.Dropout(dropout_rate)
self.dropout3 = nn.Dropout(dropout_rate)
self.activation = activation
def forward( # pylint: disable=arguments-renamed
self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
decode: bool = False,
max_len: Optional[int] = None,
cache: Optional[dict] = None,
index: Optional[int] = None) -> Any:
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
decode: wether to use cache for autoregressive decoding or not.
max_len: maximum sequence length, necessary for decoding cache.
Shape:
see the docs in Transformer class.
"""
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
x = tgt
if self.norm_first:
sa_out, cache = self._sa_block(
self.norm1(x),
tgt_mask,
decode=decode,
max_len=max_len,
cache=cache,
index=index)
x = x + sa_out
x = x + self._mha_block(self.norm2(x), memory, memory_mask, None)
x = x + self._ff_block(self.norm3(x))
else:
sa_out, cache = self._sa_block(
x,
tgt_mask,
decode=decode,
max_len=max_len,
cache=cache,
index=index)
x = self.norm1(x + sa_out)
x = self.norm2(x + self._mha_block(x, memory, memory_mask, None))
x = self.norm3(x + self._ff_block(x))
return x, cache
# Self-attention block:
def _sa_block( # pylint: disable=arguments-renamed
self,
x: Tensor,
attn_mask: Optional[Tensor],
decode: bool = False,
max_len: Optional[int] = None,
cache: Optional[dict] = None,
index: Optional[int] = None) -> Any:
x, _, cache = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
need_weights=False,
decode=decode,
max_len=max_len,
cache=cache,
index=index)
return self.dropout1(x), cache
# Multihead attention block:
def _mha_block(self,
x: Tensor,
mem: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.multihead_attn(
x,
mem,
mem,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout2(x)
# Feed forward block.
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout3(x)
# Only difference to standard PyTorch class is that 'self._qkv_same_embed_dim'
# is always set to 'False' and the use of a cache registered as a buffer for
# autoregressive decoding.
class MultiheadAttention(nn.MultiheadAttention):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will
be split across ``num_heads`` (i.e. each head will have dimension
``embed_dim // num_heads``).
dropout_rate: Dropout probability on ``attn_output_weights``.
Default: ``0.0`` (no dropout_rate).
bias: If specified, adds bias to input / output projection layers.
Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at
dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value
sequences at dim=1. Default: ``False``.
kdim: Total number of features for keys. Default: ``None``
(uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None``
(uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
def __init__(self,
embed_dim,
num_heads,
dropout_rate=0.,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=True,
device=None,
dtype=None) -> None:
super().__init__(
embed_dim,
num_heads,
dropout=dropout_rate,
bias=bias,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kdim,
vdim=vdim,
batch_first=batch_first,
device=device,
dtype=dtype)
# This is set to 'True' for kdim == vdim == embed_dim in the standard
# PyTorch class.
self._qkv_same_embed_dim = False
factory_kwargs = {'device': device, 'dtype': dtype}
self.q_proj_weight = nn.Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = nn.Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = nn.Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
normal_(self.in_proj_bias, std=1e-6)
normal_(self.out_proj.bias, std=1e-6)
if self.bias_k is not None:
normal_(self.bias_k, std=1e-6)
if self.bias_v is not None:
normal_(self.bias_v, std=1e-6)
def forward(self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True,
decode: bool = False,
max_len: Optional[int] = None,
cache: Optional[dict] = None,
index: Optional[int] = None) -> Any:
r"""
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input,
:math:`(L, N, E_q)` when ``batch_first=False`` or :math:`(N, L, E_q)`
when ``batch_first=True``, where :math:`L` is the target sequence
length, :math:`N` is the batch size, and :math:`E_q` is the query
embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input,
:math:`(S, N, E_k)` when ``batch_first=False`` or :math:`(N, S, E_k)`
when ``batch_first=True``, where :math:`S` is the source sequence
length, :math:`N` is the batch size, and :math:`E_k` is the key
embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input,
:math:`(S, N, E_v)` when ``batch_first=False`` or :math:`(N, S, E_v)`
when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the
value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: Dummy argument to make MultiheadAttention compatible
with standard PyTorch TransformerEncoder implementation.
need_weights: If specified, returns ``attn_output_weights`` in addition
to ``attn_outputs``.Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain
positions. Must be of shape :math:`(L, S)` or
:math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the
batch size, :math:`L` is the target sequence length, and :math:`S`
is the source sequence length. A 2D mask will be broadcasted across
the batch while a 3D mask allows for a different mask for each entry
in the batch. Binary, byte, and float masks are supported.
For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask,
a non-zero value indicates that the corresponding position is not
allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned
``attn_weights`` should be averaged across heads. Otherwise,
``attn_weights`` are provided separately per head. Note that this
flag only has an effect when ``need_weights=True``. Default:
``True`` (i.e. average weights across heads)
decode: wether to use cache for autoregressive decoding or not.
max_len: maximum sequence length, necessary for decoding cache.
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input
is unbatched, :math:`(L, N, E)` when ``batch_first=False`` or
:math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch
size, and :math:`E` is the embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``.
If ``average_attn_weights=True``, returns attention weights averaged
across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the
target sequence length, and :math:`S` is the source sequence length.
If ``average_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched
or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
del key_padding_mask
is_batched = query.dim() == 3
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
name = f'decoder.layers.{index}.self_attn'
loc_cache = cache[name] if decode and name in cache else None
attn_output, attn_output_weights, loc_cache = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_bias, self.bias_k, self.bias_v,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training, need_weights=need_weights, attn_mask=attn_mask,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
average_attn_weights=average_attn_weights,
decode=decode, cache=loc_cache, max_len=max_len)
if decode:
cache[name] = loc_cache
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights, cache
else:
return attn_output, attn_output_weights, cache
def _in_projection(
q: Tensor,
k: Tensor,
v: Tensor,
w_q: Tensor,
w_k: Tensor,
w_v: Tensor,
b_q: Optional[Tensor] = None,
b_k: Optional[Tensor] = None,
b_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Performs the in-projection step of the attention operation. This is simply
a triple of linear projections, with shape constraints on the weights which
ensure embedding dimension uniformity in the projected outputs.
Output is a triple containing projection tensors for query, key and value.
"""
eq, ek = q.size(-1), k.size(-1)
assert w_q.shape == (eq, eq), \
f'Expecting query weights shape of {(eq, eq)}, but got {w_q.shape}'
assert w_k.shape == (eq, ek), \
f'Expecting key weights shape of {(eq, ek)}, but got {w_k.shape}'
assert w_v.shape == (eq, ek), \
f'Expecting value weights shape of {(eq, ek)}, but got {w_v.shape}'
assert b_q is None or b_q.shape == (eq,), \
f'Expecting query bias shape of {(eq,)}, but got {b_q.shape}'
assert b_k is None or b_k.shape == (eq,), \
f'Expecting key bias shape of {(eq,)}, but got {b_k.shape}'
assert b_v is None or b_v.shape == (eq,), \
f'Expecting value bias shape of {(eq,)}, but got {b_v.shape}'
return torch.nn.functional.linear(q, w_q, b_q), \
torch.nn.functional.linear(k, w_k, b_k), \
torch.nn.functional.linear(v, w_v, b_v)
# Modified to create cache for autoregressive decoding.
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
dropout_rate: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
average_attn_weights: bool = True,
decode: bool = False,
cache: Optional[dict] = None,
max_len: Optional[int] = None) -> Any:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_bias: input projection bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
dropout_rate: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout_rate if is ``True``.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions.
A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the
entries of each batch.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias:
input projection weight and bias.
average_attn_weights: If true, indicates that the returned ``attn_weights``
should be averaged across heads.
Otherwise, ``attn_weights`` are provided separately per head.
Note that this flag only has an effect when ``need_weights=True.``.
Default: True
decode: wether to use cache for autoregressive decoding or not.
cache: dict which contains cache for decoding for the current
MulitheadAttention module.
max_len: maximum sequence length, necessary for decoding cache.
Shape:
Inputs:
- query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence
length, N is the batch size, E is the embedding dimension.
- key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence
length, N is the batch size, E is the embedding dimension.
- value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence
length, N is the batch size, E is the embedding dimension.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length,
S is the source sequence length. 3D mask :math:`(N*num_heads, L, S)`
where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is
allowed to attend the unmasked positions. If a ByteTensor is provided,
the non-zero positions are not allowed to attend while the zero positions
will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged.
If a FloatTensor is provided, it will be added to the attention weight.
Outputs:
- attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target
sequence length, N is the batch size, E is the embedding dimension.
- attn_output_weights: Only returned when ``need_weights=True``.
If ``average_attn_weights=True``, returns
attention weights averaged across heads of shape :math:`(L, S)` when input
is unbatched or :math:`(N, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source
sequence length. If ``average_weights=False``, returns attention weights
per head of shape :math:`(num_heads, L, S)` when input is unbatched or
:math:`(N, num_heads, L, S)`.
"""
# Set up shape variables.
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert embed_dim == embed_dim_to_check, \
f'was expecting dimension of {embed_dim_to_check}, but got {embed_dim}'
if isinstance(embed_dim, torch.Tensor):
# `embed_dim` can be a tensor when JIT tracing.
head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, \
f'embed_dim {embed_dim} not divisible by num_heads {num_heads}'
# Allow MHA to have different embedding dimensions when separate projection
# weights are used.
assert key.shape[:2] == value.shape[:2], \
(f"key's sequence and batch dims {key.shape[:2]} do not match value's "
f'{value.shape[:2]}')
# Compute in-projection.
assert q_proj_weight is not None, \
'use_separate_proj_weight is True but q_proj_weight is None'
assert k_proj_weight is not None, \
'use_separate_proj_weight is True but k_proj_weight is None'
assert v_proj_weight is not None, \
'use_separate_proj_weight is True but v_proj_weight is None'
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3)
q, k, v = _in_projection(
query, key, value, q_proj_weight, k_proj_weight,
v_proj_weight, b_q, b_k, b_v)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if decode:
if cache is None:
cache = {
'cached_key':
torch.zeros((bsz, max_len, embed_dim),
dtype=k.dtype,
device=k.device),
'cached_value':
torch.zeros((bsz, max_len, embed_dim),
dtype=v.dtype,
device=v.device),
'cache_index':
torch.tensor(0, dtype=torch.long, device=k.device),
}
cached_key = cache['cached_key']
cached_value = cache['cached_value']
cache_index = cache['cache_index']
batch_size, max_length, num_features = cached_key.shape
assert batch_size == bsz, f'{batch_size} != {bsz}'
assert max_length == max_len, f'{max_length} != {max_len}'
assert num_features == embed_dim, f'{num_features} != {embed_dim}'
# Shape check of cached keys against query input.
expected_shape = (1, batch_size, num_features)
if expected_shape != query.shape:
raise ValueError('Autoregressive cache shape error, expected query shape '
f'{expected_shape} instead got {query.shape}.')
# Update key, value caches with our new 1d spatial slices.
cached_key[:, cache_index:cache_index + 1, :] = k.transpose(dim0=0, dim1=1)
cached_value[:, cache_index:cache_index + 1, :] = v.transpose(
dim0=0, dim1=1)
k = cached_key.transpose(dim0=0, dim1=1)
v = cached_value.transpose(dim0=0, dim1=1)
cache_index += 1
# Causal mask for cached decoder self-attention:
# our single query position should only attend to those key
# positions that have already been generated and cached,
# not the remaining zero elements.
if attn_mask is not None:
raise ValueError('Attention mask has to be None for decode == True.')
attn_mask = (torch.arange(max_length, device=k.device) >=
cache_index).reshape(1, max_length)
# Prepare attention mask.
if not decode and attn_mask is not None:
if attn_mask.dtype == torch.uint8:
warnings.warn(
'Byte tensor for attn_mask in nn.MultiheadAttention is deprecated.'
'Use bool tensor instead.')
attn_mask = attn_mask.to(torch.bool)
else:
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
f'float, byte, and bool types are supported, not {attn_mask.dtype}'
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(
f'The shape of the 2D attn_mask is {attn_mask.shape}, '
f'but should be {correct_2d_size}.')
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(f'The shape of attn_mask is {attn_mask.shape}, '
f'should be {correct_3d_size}.')
else:
raise RuntimeError(
f"attn_mask's dimension {attn_mask.dim()} is not supported")
# Add bias along batch dimension (currently second).
if bias_k is not None and bias_v is not None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
else:
assert bias_k is None
assert bias_v is None
# Reshape q, k, v for multihead attention and make em batch first.
q = \
q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
k = \
k.contiguous().view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
v = \
v.contiguous().view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
# Update source sequence length after adjustments.
src_len = k.size(1)
# Convert mask to float.
if attn_mask is not None and attn_mask.dtype == torch.bool:
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
new_attn_mask.masked_fill_(attn_mask, -1e10)
attn_mask = new_attn_mask
# Adjust dropout_rate probability.
if not training:
dropout_rate = 0.0
# Calculate attention and out projection.
attn_output = torch.nn.functional.scaled_dot_product_attention(
q, k, v, attn_mask, dropout_rate)
attn_output = attn_output.transpose(0, 1).contiguous().view(
tgt_len * bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
if need_weights:
q_scaled = q / math.sqrt(q.shape[-1])
if attn_mask is not None:
attn_output_weights = torch.baddbmm(attn_mask,
q_scaled,
k.transpose(-2, -1))
else:
attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
# Optionally average attention weights over heads.
attn_output_weights = attn_output_weights.view(bsz,
num_heads,
tgt_len,
src_len)
if average_attn_weights:
attn_output_weights = attn_output_weights.sum(dim=1) / num_heads
return attn_output, attn_output_weights, cache
else:
return attn_output, None, cache
|
"""WMT workload implemented in PyTorch."""
import contextlib
from typing import Any, Dict, Optional, Tuple
from absl import logging
import jax
import tensorflow as tf
import torch
import torch.distributed as dist
from torch.nn import DataParallel as DP
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.wmt import bleu
from algorithmic_efficiency.workloads.wmt.wmt_pytorch import decode
from algorithmic_efficiency.workloads.wmt.wmt_pytorch.models import Transformer
from algorithmic_efficiency.workloads.wmt.workload import BaseWmtWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
class WmtWorkload(BaseWmtWorkload):
"""WMT PyTorch workload."""
def compute_weighted_cross_entropy(
self,
logits: spec.Tensor,
targets: spec.Tensor,
weights: Optional[spec.Tensor] = None,
label_smoothing: float = 0.1) -> Dict[str, spec.Tensor]: # differentiable
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and off
values.
Returns:
{'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(f'Incorrect shapes. Got shape {logits.shape} logits and '
f'{targets.shape} targets.')
loss_fn = torch.nn.CrossEntropyLoss(
reduction='none', label_smoothing=label_smoothing)
if N_GPUS > 1 and not USE_PYTORCH_DDP:
loss_fn = DP(loss_fn)
# PyTorch loss functions expect the class dim directly after the batch dim.
per_example_losses = loss_fn(logits.transpose(-2, -1), targets)
if weights is None:
weights = torch.ones_like(targets)
per_example_losses = torch.where(
weights.to(torch.bool), per_example_losses, 0.)
summed_loss = per_example_losses.sum()
n_valid_examples = weights.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
# Primary eval / decode step functions.
# ----------------------------------------------------------------------------
@torch.no_grad()
def predict_step(self,
inputs: spec.Tensor,
params: spec.ParameterContainer,
eos_id: int,
max_decode_len: int,
beam_size: int = 4) -> spec.Tensor:
"""Predict translation with fast decoding beam search on a batch."""
params = params.module if isinstance(params, (DP, DDP)) else params
params.eval()
encoder = params.encoder
if N_GPUS > 1 and not USE_PYTORCH_DDP:
encoder = DP(encoder)
encoded_inputs = torch.repeat_interleave(
encoder(inputs), repeats=beam_size, dim=0)
raw_inputs = torch.repeat_interleave(inputs, repeats=beam_size, dim=0)
decoder = params.decoder
if N_GPUS > 1 and not USE_PYTORCH_DDP:
decoder = DP(decoder)
def tokens_ids_to_logits(
flat_ids: spec.Tensor, flat_cache: Dict[str, spec.Tensor]
) -> Tuple[spec.Tensor, Dict[str, spec.Tensor]]:
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_flat_cache = decoder(
flat_ids,
encoded_inputs,
raw_inputs,
decode=True,
max_len=max_decode_len,
cache=flat_cache)
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(dim=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
None,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
eos_id=eos_id,
max_decode_len=max_decode_len)
# Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension
# sorted in increasing order of log-probability.
# Return the highest scoring beam sequence, drop first dummy 0 token.
return beam_seqs[:, -1, 1:]
def translate_and_calculate_bleu(self,
params: spec.ParameterContainer,
ds_iter: tf.data.Dataset,
num_batches: int,
max_predict_length: int):
"""Translates the `ds_iter` and calculates the BLEU score."""
logging.info('Translating evaluation dataset.')
references, predictions = [], []
for _ in range(num_batches):
pred_batch = next(ds_iter)
inputs = pred_batch['inputs']
targets = pred_batch['targets']
predicted = self.predict_step(inputs,
params,
decode.EOS_ID,
max_predict_length)
# Find actual batch size, ignoring the potential padding.
weights = pred_batch.get('weights')
if weights is not None:
actual_batch_size = weights.sum(0)[0].item()
else:
actual_batch_size = len(predicted)
# Iterate through non-padding examples of batch.
for idx in range(actual_batch_size):
references.append(self._decode_tokens(targets[idx]))
predictions.append(self._decode_tokens(predicted[idx]))
# Calculate BLEU score for translated eval corpus against reference.
bleu_score = bleu.corpus_bleu(predictions, [references]).score
return bleu_score
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""aux_dropout_rate is used as attention_dropout_rate."""
torch.random.manual_seed(rng[0])
model = Transformer(
dropout_rate=dropout_rate, attention_dropout_rate=aux_dropout_rate)
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = DP(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'shared_embedding.weight'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
del update_batch_norm
model = params
if mode == spec.ForwardPassMode.EVAL:
model.eval()
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits_batch = model(
src=augmented_and_preprocessed_input_batch['inputs'],
tgt=augmented_and_preprocessed_input_batch['targets'],
inputs_positions=augmented_and_preprocessed_input_batch.get(
'inputs_position', None),
targets_positions=augmented_and_preprocessed_input_batch.get(
'targets_position', None),
inputs_segmentation=augmented_and_preprocessed_input_batch.get(
'inputs_segmentation', None),
targets_segmentation=augmented_and_preprocessed_input_batch.get(
'targets_segmentation', None))
return logits_batch, None
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False):
per_device_batch_size = int(global_batch_size / N_GPUS)
n_inputs = 7 if split == 'train' else 3
# The input pipeline has to be created in all processes, because
# self._tokenizer has to be available in every process.
np_iter = super()._build_input_queue(data_rng,
split,
data_dir,
global_batch_size,
num_batches,
repeat_final_dataset)
# We only need np_iter in one Python process.
if RANK != 0:
del np_iter
while True:
# Only iterate over tf input pipeline in one Python process to
# avoid creating too many threads.
if RANK == 0:
batch = next(np_iter) # pylint: disable=stop-iteration-return
tensor_list = []
for key, value in batch.items():
tensor = torch.as_tensor(value, dtype=torch.int64, device=DEVICE)
tensor_list.append(tensor)
batch[key] = (
tensor[0] if USE_PYTORCH_DDP else tensor.view(
-1, value.shape[-1]))
# Send batch to other devices when using DDP.
if USE_PYTORCH_DDP:
# During eval, the batch size of the remainder might be different.
if split != 'train':
per_device_batch_size = torch.tensor(
len(batch['inputs']), dtype=torch.int32, device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
# We don't need to broadcast the batch for the device with RANK == 0.
dist.broadcast(torch.stack(tensor_list)[:, 1:].contiguous(), src=0)
else:
batch = {}
# During eval, the batch size of the remainder might be different.
if split != 'train':
per_device_batch_size = torch.empty((1,),
dtype=torch.int32,
device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
# N_GPUS - 1 since we don't broadcast the batch for RANK == 0.
tensor = torch.empty((n_inputs, N_GPUS - 1, per_device_batch_size, 256),
dtype=torch.int64,
device=DEVICE)
dist.broadcast(tensor, src=0)
# Note that the order of the keys is important.
if split == 'train':
keys = [
'inputs',
'inputs_position',
'inputs_segmentation',
'targets',
'targets_position',
'targets_segmentation',
'weights',
]
# For all eval/test splits.
else:
keys = ['inputs', 'targets', 'weights']
for key, n in zip(keys, range(n_inputs)):
# RANK - 1 since we don't broadcast the batch for RANK == 0.
batch[key] = tensor[n][RANK - 1]
yield batch
def eval_step(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor]) -> Dict[str, spec.Tensor]:
"""Calculate evaluation metrics on a batch."""
targets = batch['targets']
weights = batch['weights']
logits, _ = self.model_fn(
params,
batch,
mode=spec.ForwardPassMode.EVAL,
model_state=None,
rng=None,
update_batch_norm=False)
summed_loss = self.compute_weighted_cross_entropy(logits,
targets,
weights,
0.0)['summed']
acc_sum, weight_sum = self.compute_weighted_accuracy(
logits, targets, weights)
return {
'loss': summed_loss,
'accuracy': acc_sum,
'denominator': weight_sum,
}
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
del num_examples
if USE_PYTORCH_DDP:
for metric in total_metrics.values():
dist.all_reduce(metric)
total_metrics = {k: v.item() for k, v in total_metrics.items()}
eval_denominator = total_metrics.pop('denominator')
return jax.tree_map(lambda x: float(x / eval_denominator), total_metrics)
|
"""Fast decoding routines for inference from a trained model.
PyTorch port of https://github.com/google/flax/tree/main/examples/wmt.
"""
from dataclasses import dataclass
from typing import Any, Callable, Dict, Optional, Tuple, Union
import jax
import torch
import torch.nn.functional as F
from algorithmic_efficiency.pytorch_utils import pytorch_setup
DEVICE = pytorch_setup()[2]
# Constants
# We assume the default End-of-Sentence token id is 2 (SentencePiece).
EOS_ID = 2
# "Effective negative infinity" constant for masking in beam search.
NEG_INF = torch.tensor(-1.0e7, device=DEVICE)
def brevity_penalty(alpha: float, length: Union[int,
torch.Tensor]) -> torch.Tensor:
"""Brevity penalty function for beam search penalizing short sequences.
Args:
alpha: float: brevity-penalty scaling parameter.
length: int or scalar int tensor: length of considered sequence.
Returns:
Brevity penalty score as jax scalar.
"""
if not isinstance(length, torch.Tensor):
length = torch.tensor(length, device=DEVICE)
return torch.pow((5.0 + length) / 6.0, alpha)
# Beam handling utility functions:
def add_beam_dim(x: torch.Tensor, beam_size: int) -> torch.Tensor:
"""Creates new beam dimension in non-scalar array and tiles into it."""
if x.dim() < 2: # ignore scalars (e.g. cache index)
return x
x = x.unsqueeze(dim=1)
tile_dims = [1] * x.dim()
tile_dims[1] = beam_size
return torch.tile(x, tile_dims)
def flatten_beam_dim(x: torch.Tensor) -> torch.Tensor:
"""Flattens the first two dimensions of a non-scalar array."""
if x.dim() < 2: # ignore scalars (e.g. cache index)
return x
return x.view(-1, *x.shape[2:])
def unflatten_beam_dim(x: torch.Tensor, batch_size: int,
beam_size: int) -> torch.Tensor:
"""Unflattens the first, flat batch*beam dimension of a non-scalar tensor."""
if x.dim() < 2: # ignore scalars (e.g. cache index)
return x
assert batch_size * beam_size == x.shape[0]
return x.view(batch_size, beam_size, *x.shape[1:])
def flat_batch_beam_expand(x: torch.Tensor, beam_size: int) -> torch.Tensor:
"""Expands the each batch item by beam_size in batch_dimension."""
return flatten_beam_dim(add_beam_dim(x, beam_size))
def gather_beams(nested: Dict[str, Any],
beam_indices: torch.Tensor,
batch_size: int,
new_beam_size: int) -> Dict[str, Any]:
"""Gathers the beam slices indexed by beam_indices into new beam tensor.
Args:
nested: Dict of (dicts of) tensors.
beam_indices: tensor of beam_indices.
batch_size: int: size of batch.
new_beam_size: int: size of _new_ beam dimension.
Returns:
New dict with new beam tensors.
[batch_size, old_beam_size, ...] --> [batch_size, new_beam_size, ...]
"""
batch_indices = torch.reshape(
torch.div(
torch.arange(batch_size * new_beam_size, device=DEVICE),
new_beam_size,
rounding_mode='floor'), (batch_size, new_beam_size))
def gather_fn(x):
if x.dim() < 2: # ignore scalars (e.g. cache index)
return x
return x[batch_indices, beam_indices]
return jax.tree_map(gather_fn, nested)
def gather_topk_beams(nested: Dict[str, Any],
score_or_log_prob: torch.Tensor,
batch_size: int,
new_beam_size: int) -> Dict[str, Any]:
"""Gathers the top-k beam slices given by score_or_log_prob array.
Args:
nested: Dict of (dicts of) tensors.
score_or_log_prob: [batch_size, old_beam_size] tensor of values to sort by
for top-k selection of beam slices.
batch_size: int: size of batch.
new_beam_size: int: size of _new_ top-k selected beam dimension
Returns:
New dict with new beam tensors containing top k new_beam_size slices.
[batch_size, old_beam_size, ...] --> [batch_size, new_beam_size, ...]
"""
_, topk_indices = torch.topk(score_or_log_prob, k=new_beam_size)
topk_indices = torch.flip(topk_indices, (1,))
return gather_beams(nested, topk_indices, batch_size, new_beam_size)
# Beam search state:
@dataclass
class BeamState:
"""Holds beam search state data."""
# The position of the decoding loop in the length dimension.
cur_index: torch.Tensor # scalar int32: current decoded length index.
# The active sequence log probabilities and finished sequence scores.
live_logprobs: torch.Tensor # float32: [batch_size, beam_size]
finished_scores: torch.Tensor # float32: [batch_size, beam_size]
# The current active-beam-searching and finished sequences.
live_seqs: torch.Tensor # int32: [batch_size, beam_size, max_decode_len]
finished_seqs: torch.Tensor # int32: [batch_size, beam_size, max_decode_len]
# Records which of the 'finished_seqs' is occupied and not a filler slot.
finished_flags: torch.Tensor # bool: [batch_size, beam_size]
# The current state of the autoregressive decoding caches.
cache: Dict[str, Any] # Any dict (of dicts), with torch.Tensors as leafs.
def beam_init(batch_size: int,
beam_size: int,
max_decode_len: int,
cache: Dict[str, Any]) -> BeamState:
"""Initializes the beam search state data structure."""
cur_index0 = torch.tensor(0, device=DEVICE)
live_logprobs0 = torch.tile(
torch.tensor([0.0] + [NEG_INF] * (beam_size - 1), device=DEVICE),
[batch_size, 1])
finished_scores0 = (
torch.ones((batch_size, beam_size), device=DEVICE) * NEG_INF)
live_seqs0 = torch.zeros((batch_size, beam_size, max_decode_len),
dtype=torch.int32,
device=DEVICE)
finished_seqs0 = torch.zeros((batch_size, beam_size, max_decode_len),
dtype=torch.int32,
device=DEVICE)
finished_flags0 = torch.zeros((batch_size, beam_size),
dtype=torch.bool,
device=DEVICE)
# add beam dimension to attention cache pytree elements
beam_cache0 = jax.tree_map(lambda x: add_beam_dim(x, beam_size), cache)
return BeamState(
cur_index=cur_index0,
live_logprobs=live_logprobs0,
finished_scores=finished_scores0,
live_seqs=live_seqs0,
finished_seqs=finished_seqs0,
finished_flags=finished_flags0,
cache=beam_cache0)
# Beam search routine:
def beam_search(
inputs: torch.Tensor,
cache: Optional[Dict[str, Any]],
tokens_to_logits: Callable,
beam_size: int = 4,
alpha: float = 0.6,
eos_id: int = EOS_ID,
max_decode_len: Optional[int] = None) -> Tuple[torch.Tensor, torch.Tensor]:
"""Beam search for transformer machine translation.
Args:
inputs: torch.Tensor: [batch_size, length] int32 sequence of tokens.
cache: Dict of (dicts of) tensors.
tokens_to_logits: fast autoregressive decoder function taking single token
slices and cache and returning next-token logits and updated cache.
beam_size: int: number of beams to use in beam search.
alpha: float: scaling factor for brevity penalty.
eos_id: int: id of end-of-sentence token for target vocabulary.
max_decode_len: int: maximum length of decoded translations.
Returns:
Tuple of:
[batch_size, beam_size, max_decode_len] top-scoring sequences
[batch_size, beam_size] beam-search scores.
"""
# We liberally annotate shape information for clarity below.
batch_size = inputs.shape[0]
if max_decode_len is None:
max_decode_len = inputs.shape[1]
end_marker = torch.tensor(eos_id, device=DEVICE)
# initialize beam search state
beam_search_init_state = beam_init(batch_size,
beam_size,
max_decode_len,
cache)
def beam_search_loop_cond_fn(state: BeamState) -> bool:
"""Beam search loop termination condition."""
# Have we reached max decoding length?
not_at_end = state.cur_index < max_decode_len - 1
# Is no further progress in the beam search possible?
# Get the best possible scores from alive sequences.
min_brevity_penalty = brevity_penalty(alpha, max_decode_len)
best_live_scores = state.live_logprobs[:, -1:] / min_brevity_penalty
# Get the worst scores from finished sequences.
worst_finished_scores, _ = torch.min(
state.finished_scores, dim=1, keepdim=True)
# Mask out scores from slots without any actual finished sequences.
worst_finished_scores = torch.where(state.finished_flags,
worst_finished_scores,
NEG_INF)
# If no best possible live score is better than current worst finished
# scores, the search cannot improve the finished set further.
search_terminated = torch.all(worst_finished_scores > best_live_scores)
# If we're not at the max decode length, and the search hasn't terminated,
# continue looping.
return not_at_end & (~search_terminated)
def beam_search_loop_body_fn(state: BeamState) -> BeamState:
"""Beam search loop state update function."""
# Collect the current position slice along length to feed the fast
# autoregressive decoder model. Flatten the beam dimension into batch
# dimension for feeding into the model.
# --> [batch * beam, 1]
cur_index = state.cur_index
flat_ids = flatten_beam_dim(
state.live_seqs[:batch_size, :beam_size, cur_index:cur_index + 1])
# Flatten beam dimension into batch to be compatible with model.
# {[batch, beam, ...], ...} --> {[batch * beam, ...], ...}
flat_cache = jax.tree_map(flatten_beam_dim, state.cache)
# Call fast-decoder model on current tokens to get next-position logits.
# --> [batch * beam, vocab]
flat_logits, new_flat_cache = tokens_to_logits(flat_ids, flat_cache)
# unflatten beam dimension
# [batch * beam, vocab] --> [batch, beam, vocab]
logits = unflatten_beam_dim(flat_logits, batch_size, beam_size)
# Unflatten beam dimension in attention cache arrays
# {[batch * beam, ...], ...} --> {[batch, beam, ...], ...}
new_cache = jax.tree_map(
lambda x: unflatten_beam_dim(x, batch_size, beam_size), new_flat_cache)
# Gather log probabilities from logits
candidate_log_probs = F.log_softmax(logits, dim=-1)
# Add new logprobs to existing prefix logprobs.
# --> [batch, beam, vocab]
log_probs = candidate_log_probs + state.live_logprobs.unsqueeze(dim=2)
# We'll need the vocab size, gather it from the log probability dimension.
vocab_size = log_probs.shape[2]
# Each item in batch has beam_size * vocab_size candidate sequences.
# For each item, get the top 2*k candidates with the highest log-
# probabilities. We gather the top 2*K beams here so that even if the best
# K sequences reach EOS simultaneously, we have another K sequences
# remaining to continue the live beam search.
beams_to_keep = 2 * beam_size
# Flatten beam and vocab dimensions.
flat_log_probs = log_probs.view(batch_size, beam_size * vocab_size)
# Gather the top 2*K scores from _all_ beams.
# --> [batch, 2*beams], [batch, 2*beams]
topk_log_probs, topk_indices = torch.topk(flat_log_probs, k=beams_to_keep)
# Recover the beam index by floor division.
topk_beam_indices = torch.div(
topk_indices, vocab_size, rounding_mode='floor')
# Gather 2*k top beams.
# --> [batch, 2*beams, length]
topk_seq = gather_beams(state.live_seqs,
topk_beam_indices,
batch_size,
beams_to_keep)
# Append the most probable 2*K token IDs to the top 2*K sequences
# Recover token id by modulo division and expand Id array for broadcasting.
# --> [batch, 2*beams, 1]
topk_ids = torch.unsqueeze(topk_indices % vocab_size, dim=2)
# Update sequences for the 2*K top-k new sequences.
# --> [batch, 2*beams, length]
topk_seq[:, :, cur_index + 1:] = topk_ids
# Update LIVE (in-progress) sequences:
# Did any of these sequences reach an end marker?
# --> [batch, 2*beams]
newly_finished = (topk_seq[:, :, cur_index + 1] == end_marker)
# To prevent these newly finished sequences from being added to the LIVE
# set of active beam search sequences, set their log probs to a very large
# negative value.
new_log_probs = topk_log_probs + newly_finished * NEG_INF
# Determine the top k beam indices (from top 2*k beams) from log probs.
# --> [batch, beams]
_, new_topk_indices = torch.topk(new_log_probs, k=beam_size)
new_topk_indices = torch.flip(new_topk_indices, (1,))
# Gather the top k beams (from top 2*k beams).
# --> [batch, beams, length], [batch, beams]
top_alive_seq, top_alive_log_probs = gather_beams([topk_seq, new_log_probs],
new_topk_indices,
batch_size, beam_size)
# Determine the top k beam indices from the original set of all beams.
# --> [batch, beams]
top_alive_indices = gather_beams(topk_beam_indices,
new_topk_indices,
batch_size,
beam_size)
# With these, gather the top k beam-associated caches.
# --> {[batch, beams, ...], ...}
top_alive_cache = gather_beams(new_cache,
top_alive_indices,
batch_size,
beam_size)
# Update FINISHED (reached end of sentence) sequences:
# Calculate new seq scores from log probabilities.
new_scores = topk_log_probs / brevity_penalty(alpha, cur_index + 1)
# Mask out the still unfinished sequences by adding large negative value.
# --> [batch, 2*beams]
new_scores += (~newly_finished) * NEG_INF
# Combine sequences, scores, and flags along the beam dimension and compare
# new finished sequence scores to existing finished scores and select the
# best from the new set of beams.
finished_seqs = torch.cat( # --> [batch, 3*beams, length]
[state.finished_seqs, topk_seq], dim=1)
finished_scores = torch.cat( # --> [batch, 3*beams]
[state.finished_scores, new_scores], dim=1)
finished_flags = torch.cat( # --> [batch, 3*beams]
[state.finished_flags, newly_finished], dim=1)
# --> [batch, beams, length], [batch, beams], [batch, beams]
top_finished_seq, top_finished_scores, top_finished_flags = (
gather_topk_beams([finished_seqs, finished_scores, finished_flags],
finished_scores, batch_size, beam_size))
return BeamState(
cur_index=cur_index + 1,
live_logprobs=top_alive_log_probs,
finished_scores=top_finished_scores,
live_seqs=top_alive_seq,
finished_seqs=top_finished_seq,
finished_flags=top_finished_flags,
cache=top_alive_cache)
state = beam_search_init_state
while beam_search_loop_cond_fn(state):
state = beam_search_loop_body_fn(state)
final_state = state
# Account for the edge-case where there are no finished sequences for a
# particular batch item. If so, return live sequences for that batch item.
# --> [batch]
none_finished = torch.any(final_state.finished_flags, dim=1)
# --> [batch, beams, length]
finished_seqs = torch.where(none_finished[:, None, None],
final_state.finished_seqs,
final_state.live_seqs)
# --> [batch, beams]
finished_scores = torch.where(none_finished[:, None],
final_state.finished_scores,
final_state.live_logprobs)
return finished_seqs, finished_scores
|
"""Transformer-based machine translation model.
Reference https://github.com/google/flax/tree/main/examples/wmt.
"""
from typing import Any, Callable, Optional
from flax import linen as nn
from flax import struct
from jax import lax
import jax.numpy as jnp
import numpy as np
@struct.dataclass
class TransformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
share_embeddings: bool = True
dtype: Any = jnp.float32
vocab_size: int = 32000
emb_dim: int = 1024
num_heads: int = 16
num_layers: int = 6
qkv_dim: int = 1024
mlp_dim: int = 1024
max_len: int = 256
#If None, defaults to 0.1.
dropout_rate: Optional[float] = 0.1
#If None, defaults to 0.1.
attention_dropout_rate: Optional[float] = 0.1
deterministic: bool = False
decode: bool = False
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
posemb_init: Optional[Callable] = None
def shift_right(x, axis=1):
"""Shift the input to the right by padding on axis 1."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return padded[:, :-1]
def sinusoidal_init(max_len=2048, min_scale=1.0, max_scale=10000.0):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input.
min_scale: float: minimum frequency-scale in sine grating.
max_scale: float: maximum frequency-scale in sine grating.
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
pe[:, :d_feature // 2] = np.sin(position * div_term)
pe[:, d_feature // 2:2 * (d_feature // 2)] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
decode: whether to run in single-position autoregressive mode.
"""
config: TransformerConfig
decode: bool = False
@nn.compact
def __call__(self, inputs, inputs_positions=None):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init in the configuration.
Args:
inputs: input data.
inputs_positions: input position indices for packed sequences.
Returns:
output: `(bs, timesteps, in_dim)`
"""
cfg = self.config
# inputs.shape is (batch_size, seq_len, emb_dim)
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
f' but it is: {inputs.ndim}')
length = inputs.shape[1]
pos_emb_shape = (1, cfg.max_len, inputs.shape[-1])
if cfg.posemb_init is None:
# Use a fixed (non-learned) sinusoidal position embedding.
pos_embedding = sinusoidal_init(max_len=cfg.max_len)(None,
pos_emb_shape,
None)
else:
pos_embedding = self.param('pos_embedding',
cfg.posemb_init,
pos_emb_shape)
pe = pos_embedding[:, :length, :]
# We use a cache position index for tracking decoding position.
if self.decode:
is_initialized = self.has_variable('cache', 'cache_index')
cache_index = self.variable('cache',
'cache_index',
lambda: jnp.array(0, dtype=jnp.uint32))
if is_initialized:
i = cache_index.value
cache_index.value = i + 1
_, _, df = pos_embedding.shape
pe = lax.dynamic_slice(pos_embedding, jnp.array((0, i, 0)), (1, 1, df))
if inputs_positions is None:
# normal unpacked case:
return inputs + pe
else:
# for packed data we need to use known position indices:
return inputs + jnp.take(pe[0], inputs_positions, axis=0)
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
out_dim: optionally specify out dimension.
"""
config: TransformerConfig
out_dim: Optional[int] = None
@nn.compact
def __call__(self, inputs):
"""Applies Transformer MlpBlock module."""
cfg = self.config
actual_out_dim = (
inputs.shape[-1] if self.out_dim is None else self.out_dim)
x = nn.Dense(
cfg.mlp_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(
inputs)
x = nn.relu(x)
if cfg.dropout_rate is None:
dropout_rate = 0.1
else:
dropout_rate = cfg.dropout_rate
x = nn.Dropout(rate=dropout_rate)(x, deterministic=cfg.deterministic)
output = nn.Dense(
actual_out_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(
x)
output = nn.Dropout(rate=dropout_rate)(
output, deterministic=cfg.deterministic)
return output
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self, inputs, encoder_mask=None):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
encoder_mask: encoder self-attention mask.
Returns:
output after transformer encoder block.
"""
cfg = self.config
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(inputs)
if cfg.attention_dropout_rate is None:
attention_dropout_rate = 0.1
else:
attention_dropout_rate = cfg.attention_dropout_rate
x = nn.SelfAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=cfg.deterministic)(x, encoder_mask)
if cfg.dropout_rate is None:
dropout_rate = 0.1
else:
dropout_rate = cfg.dropout_rate
x = nn.Dropout(rate=dropout_rate)(x, deterministic=cfg.deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = MlpBlock(config=cfg)(y)
return x + y
class EncoderDecoder1DBlock(nn.Module):
"""Transformer encoder-decoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None):
"""Applies EncoderDecoder1DBlock module.
Args:
targets: input data for decoder
encoded: input data from encoder
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask.
Returns:
output after transformer encoder-decoder block.
"""
cfg = self.config
# Decoder block.
assert targets.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(targets)
if cfg.attention_dropout_rate is None:
attention_dropout_rate = 0.1
else:
attention_dropout_rate = cfg.attention_dropout_rate
x = nn.SelfAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=cfg.deterministic,
decode=cfg.decode)(x, decoder_mask)
if cfg.dropout_rate is None:
dropout_rate = 0.1
else:
dropout_rate = cfg.dropout_rate
x = nn.Dropout(rate=dropout_rate)(x, deterministic=cfg.deterministic)
x = x + targets
# Encoder-Decoder block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = nn.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=cfg.deterministic)(y, encoded, encoder_decoder_mask)
y = nn.Dropout(rate=dropout_rate)(y, deterministic=cfg.deterministic)
y = y + x
# MLP block.
z = nn.LayerNorm(dtype=cfg.dtype)(y)
z = MlpBlock(config=cfg)(z)
return y + z
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self, inputs, inputs_positions=None, encoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_positions: input subsequence positions for packed examples.
encoder_mask: decoder self-attention mask.
Returns:
output of a transformer encoder.
"""
cfg = self.config
assert inputs.ndim == 2 # (batch, len)
# Input Embedding
if self.shared_embedding is None:
input_embed = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = self.shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
x = AddPositionEmbs(
config=cfg, decode=False, name='posembed_input')(
x, inputs_positions=inputs_positions)
if cfg.dropout_rate is None:
dropout_rate = 0.1
else:
dropout_rate = cfg.dropout_rate
x = nn.Dropout(rate=dropout_rate)(x, deterministic=cfg.deterministic)
x = x.astype(cfg.dtype)
# Input Encoder
for lyr in range(cfg.num_layers):
x = Encoder1DBlock(
config=cfg, name=f'encoderblock_{lyr}')(x, encoder_mask)
encoded = nn.LayerNorm(dtype=cfg.dtype, name='encoder_layernorm')(x)
return encoded
class Decoder(nn.Module):
"""Transformer Model Decoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self,
encoded,
targets,
targets_positions=None,
decoder_mask=None,
encoder_decoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
encoded: encoded input data from encoder.
targets: target inputs.
targets_positions: input subsequence positions for packed examples.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask.
Returns:
output of a transformer decoder.
"""
cfg = self.config
assert encoded.ndim == 3 # (batch, len, depth)
assert targets.ndim == 2 # (batch, len)
# Target Embedding
if self.shared_embedding is None:
output_embed = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
output_embed = self.shared_embedding
y = targets.astype('int32')
if not cfg.decode:
y = shift_right(y)
y = output_embed(y)
y = AddPositionEmbs(
config=cfg, decode=cfg.decode, name='posembed_output')(
y, inputs_positions=targets_positions)
if cfg.dropout_rate is None:
dropout_rate = 0.1
else:
dropout_rate = cfg.dropout_rate
y = nn.Dropout(rate=dropout_rate)(y, deterministic=cfg.deterministic)
y = y.astype(cfg.dtype)
# Target-Input Decoder
for lyr in range(cfg.num_layers):
y = EncoderDecoder1DBlock(
config=cfg, name=f'encoderdecoderblock_{lyr}')(
y,
encoded,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
y = nn.LayerNorm(dtype=cfg.dtype, name='encoderdecoder_layernorm')(y)
# Use the transpose of embedding matrix for logit transform.
logits = output_embed.attend(y.astype(jnp.float32))
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(y.shape[-1])
return logits
class Transformer(nn.Module):
"""Transformer Model for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
def setup(self):
cfg = self.config
if cfg.share_embeddings:
if cfg.vocab_size is not None:
assert cfg.vocab_size == cfg.vocab_size, (
"can't share embedding with different vocab sizes.")
self.shared_embedding = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
self.shared_embedding = None
self.encoder = Encoder(config=cfg, shared_embedding=self.shared_embedding)
self.decoder = Decoder(config=cfg, shared_embedding=self.shared_embedding)
def encode(self, inputs, inputs_positions=None, inputs_segmentation=None):
"""Applies Transformer encoder-branch on the inputs.
Args:
inputs: input data.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
Returns:
encoded feature array from the transformer encoder.
"""
cfg = self.config
# Make padding attention mask.
encoder_mask = nn.make_attention_mask(
inputs > 0, inputs > 0, dtype=cfg.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if inputs_segmentation is not None:
encoder_mask = nn.combine_masks(
encoder_mask,
nn.make_attention_mask(
inputs_segmentation,
inputs_segmentation,
jnp.equal,
dtype=cfg.dtype))
return self.encoder(
inputs, inputs_positions=inputs_positions, encoder_mask=encoder_mask)
def decode(
self,
encoded,
inputs, # only needed for masks
targets,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
inputs: input data (only needed for masking).
targets: target data.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from transformer decoder.
"""
cfg = self.config
# Make padding attention masks.
if cfg.decode:
decoder_mask = None
encoder_decoder_mask = nn.make_attention_mask(
jnp.ones_like(targets) > 0, inputs > 0, dtype=cfg.dtype)
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(targets > 0, targets > 0, dtype=cfg.dtype),
nn.make_causal_mask(targets, dtype=cfg.dtype))
encoder_decoder_mask = nn.make_attention_mask(
targets > 0, inputs > 0, dtype=cfg.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if inputs_segmentation is not None:
decoder_mask = nn.combine_masks(
decoder_mask,
nn.make_attention_mask(
targets_segmentation,
targets_segmentation,
jnp.equal,
dtype=cfg.dtype))
encoder_decoder_mask = nn.combine_masks(
encoder_decoder_mask,
nn.make_attention_mask(
targets_segmentation,
inputs_segmentation,
jnp.equal,
dtype=cfg.dtype))
logits = self.decoder(
encoded,
targets,
targets_positions=targets_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
return logits.astype(self.config.dtype)
def __call__(self,
inputs,
targets,
inputs_positions=None,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data.
targets: target data.
inputs_positions: input subsequence positions for packed examples.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from full transformer.
"""
encoded = self.encode(
inputs,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation)
return self.decode(
encoded,
inputs, # only used for masks
targets,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation)
|
"""WMT workload implemented in Jax."""
import functools
from typing import Any, Dict, Iterator, Optional, Tuple
from absl import logging
from flax import jax_utils
from flax import linen as nn
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.wmt import bleu
from algorithmic_efficiency.workloads.wmt.wmt_jax import decode
from algorithmic_efficiency.workloads.wmt.wmt_jax import models
from algorithmic_efficiency.workloads.wmt.workload import BaseWmtWorkload
def _to_host(x: spec.Tensor) -> spec.Tensor:
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
class WmtWorkload(BaseWmtWorkload):
"""WMT Jax workload."""
def compute_weighted_cross_entropy(
self,
logits: spec.Tensor,
targets: spec.Tensor,
weights: Optional[spec.Tensor] = None,
label_smoothing: float = 0.1) -> Dict[str, spec.Tensor]: # differentiable
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and off
values.
Returns:
{'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(f'Incorrect shapes. Got shape {logits.shape} logits and '
f'{targets.shape} targets.')
smoothed_targets = optax.smooth_labels(
common_utils.onehot(targets, self._vocab_size), label_smoothing)
per_example_losses = -jnp.sum(
smoothed_targets * nn.log_softmax(logits), axis=-1)
if weights is None:
weights = jnp.ones_like(targets)
per_example_losses = jnp.where(weights, per_example_losses, 0.)
summed_loss = per_example_losses.sum()
n_valid_examples = weights.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
@functools.partial(
jax.pmap, axis_name='batch', static_broadcasted_argnums=(0,))
def eval_step_pmapped(
self, params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor]) -> Dict[str, spec.Tensor]:
"""Calculate evaluation metrics on a batch."""
inputs = batch['inputs']
targets = batch['targets']
weights = batch['weights']
logits = self._eval_model.apply({'params': params}, inputs, targets)
summed_loss = self.compute_weighted_cross_entropy(logits,
targets,
weights,
0.0)['summed']
acc_sum, weight_sum = self.compute_weighted_accuracy(
logits, targets, weights)
return {
'loss': summed_loss,
'accuracy': acc_sum,
'denominator': weight_sum,
}
def eval_step(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor]) -> Dict[str, spec.Tensor]:
replicated_eval_metrics = self.eval_step_pmapped(params, batch)
return jax.tree_map(lambda x: jnp.sum(x, axis=0), replicated_eval_metrics)
@functools.partial(
jax.pmap, axis_name='batch', static_broadcasted_argnums=(0,))
def initialize_cache(self,
inputs: spec.Tensor,
max_decode_len: int = 256) -> Dict[str, spec.Tensor]:
"""Initialize a cache for a given input shape and max decode length."""
config = models.TransformerConfig(deterministic=True, decode=True)
target_shape = (inputs.shape[0], max_decode_len) + inputs.shape[2:]
initial_variables = models.Transformer(config).init(
jax.random.PRNGKey(0),
jnp.ones(inputs.shape, jnp.float32),
jnp.ones(target_shape, jnp.float32))
return initial_variables['cache']
# eos_id, max_decode_len are constant.
@functools.partial(
jax.pmap, axis_name='batch', static_broadcasted_argnums=(0, 4, 5))
def predict_step(self,
inputs: spec.Tensor,
params: spec.ParameterContainer,
cache: Dict[str, spec.Tensor],
eos_id: int,
max_decode_len: int,
beam_size: int = 4) -> spec.Tensor:
"""Predict translation with fast decoding beam search on a batch."""
config = models.TransformerConfig(deterministic=True, decode=True)
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
# i.e. if we denote each batch element subtensor as el[n]:
# [el0, el1, el2] --> beamsize=2 --> [el0,el0,el1,el1,el2,el2]
encoded_inputs = decode.flat_batch_beam_expand(
models.Transformer(config).apply({'params': params},
inputs,
method=models.Transformer.encode),
beam_size)
raw_inputs = decode.flat_batch_beam_expand(inputs, beam_size)
def tokens_ids_to_logits(
flat_ids: spec.Tensor, flat_cache: Dict[str, spec.Tensor]
) -> Tuple[spec.Tensor, Dict[str, spec.Tensor]]:
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.Transformer(config).apply(
{
'params': params,
'cache': flat_cache,
},
encoded_inputs,
raw_inputs, # only needed for input padding mask
flat_ids,
mutable=['cache'],
method=models.Transformer.decode)
new_flat_cache = new_vars['cache']
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
eos_id=eos_id,
max_decode_len=max_decode_len)
# Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension
# sorted in increasing order of log-probability.
# Return the highest scoring beam sequence, drop first dummy 0 token.
return beam_seqs[:, -1, 1:]
def translate_and_calculate_bleu(self,
params: spec.ParameterContainer,
ds_iter: Iterator,
num_batches: int,
max_predict_length: int) -> spec.Tensor:
"""Translates the `predict_ds` and calculates the BLEU score."""
logging.info('Translating evaluation dataset.')
references, predictions = [], []
for _ in range(num_batches):
pred_batch = next(ds_iter)
cache = self.initialize_cache(pred_batch['inputs'])
predicted = self.predict_step(pred_batch['inputs'],
params,
cache,
decode.EOS_ID,
max_predict_length)
predicted = _to_host(predicted)
targets = _to_host(pred_batch['targets'])
# Find actual batch size, ignoring the potential padding.
weights = pred_batch.get('weights')
if weights is not None:
weights = _to_host(weights)
actual_batch_size = int(weights.sum(0)[0].item())
else:
actual_batch_size = len(predicted)
# Iterate through non-padding examples of batch.
for idx in range(actual_batch_size):
references.append(self._decode_tokens(targets[idx]))
predictions.append(self._decode_tokens(predicted[idx]))
# Calculate BLEU score for translated eval corpus against reference.
bleu_score = bleu.corpus_bleu(predictions, [references]).score
return bleu_score
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""aux_dropout_rate is used as attention_dropout_rate."""
init_fake_batch_size = 2
input_shape = (init_fake_batch_size, 256)
target_shape = (init_fake_batch_size, 256)
model_config = models.TransformerConfig(
dropout_rate=dropout_rate, attention_dropout_rate=aux_dropout_rate)
self._train_model = models.Transformer(model_config)
self._eval_model = models.Transformer(
models.TransformerConfig(deterministic=True))
initial_variables = jax.jit(self._eval_model.init)(
rng,
jnp.ones(input_shape, jnp.float32),
jnp.ones(target_shape, jnp.float32))
initial_params = initial_variables['params']
self._param_shapes = param_utils.jax_param_shapes(initial_params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
return jax_utils.replicate(initial_params), None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'shared_embedding'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del update_batch_norm
inputs = augmented_and_preprocessed_input_batch.get('inputs', None)
targets = augmented_and_preprocessed_input_batch.get('targets', None)
inputs_positions = augmented_and_preprocessed_input_batch.get(
'inputs_position', None)
targets_positions = augmented_and_preprocessed_input_batch.get(
'targets_position', None)
inputs_segmentations = augmented_and_preprocessed_input_batch.get(
'inputs_segmentation', None)
targets_segmentations = augmented_and_preprocessed_input_batch.get(
'targets_segmentation', None)
if mode == spec.ForwardPassMode.TRAIN:
model = self._train_model
else:
model = self._eval_model
logits_batch = model.apply({'params': params},
inputs,
targets,
inputs_positions=inputs_positions,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentations,
targets_segmentation=targets_segmentations,
rngs={'dropout': rng})
return logits_batch, None
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
del num_examples
eval_denominator = total_metrics.pop('denominator')
return jax.tree_map(lambda x: float(x / eval_denominator), total_metrics)
|
"""Fast decoding routines for inference from a trained model.
Forked from https://github.com/google/flax/tree/main/examples/wmt.
"""
import typing
import flax
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
# Constants
# We assume the default End-of-Sentence token id is 2 (SentencePiece).
EOS_ID = 2
# "Effective negative infinity" constant for masking in beam search.
NEG_INF = np.array(-1.0e7)
def brevity_penalty(alpha, length):
"""Brevity penalty function for beam search penalizing short sequences.
Args:
alpha: float: brevity-penalty scaling parameter.
length: int: length of considered sequence.
Returns:
Brevity penalty score as jax scalar.
"""
return jnp.power(((5.0 + length) / 6.0), alpha)
# Beam handling utility functions:
def add_beam_dim(x, beam_size):
"""Creates new beam dimension in non-scalar array and tiles into it."""
if x.ndim < 2: # ignore scalars (e.g. cache index)
return x
x = jnp.expand_dims(x, axis=1)
tile_dims = [1] * x.ndim
tile_dims[1] = beam_size
return jnp.tile(x, tile_dims)
def flatten_beam_dim(x):
"""Flattens the first two dimensions of a non-scalar array."""
if x.ndim < 2: # ignore scalars (e.g. cache index)
return x
return x.reshape((x.shape[0] * x.shape[1],) + x.shape[2:])
def unflatten_beam_dim(x, batch_size, beam_size):
"""Unflattens the first, flat batch*beam dimension of a non-scalar array."""
if x.ndim < 2: # ignore scalars (e.g. cache index)
return x
assert batch_size * beam_size == x.shape[0]
return x.reshape((batch_size, beam_size) + x.shape[1:])
def flat_batch_beam_expand(x, beam_size):
"""Expands the each batch item by beam_size in batch_dimension."""
return flatten_beam_dim(add_beam_dim(x, beam_size))
def gather_beams(nested, beam_indices, batch_size, new_beam_size):
"""Gathers the beam slices indexed by beam_indices into new beam array.
Args:
nested: pytree of arrays or scalars (the latter ignored).
beam_indices: array of beam_indices
batch_size: int: size of batch.
new_beam_size: int: size of _new_ beam dimension.
Returns:
New pytree with new beam arrays.
[batch_size, old_beam_size, ...] --> [batch_size, new_beam_size, ...]
"""
batch_indices = jnp.reshape(
jnp.arange(batch_size * new_beam_size) // new_beam_size,
(batch_size, new_beam_size))
def gather_fn(x):
if x.ndim < 2: # ignore scalars (e.g. cache index)
return x
return x[batch_indices, beam_indices]
return jax.tree_map(gather_fn, nested)
def gather_topk_beams(nested, score_or_log_prob, batch_size, new_beam_size):
"""Gathers the top-k beam slices given by score_or_log_prob array.
Args:
nested: pytree of arrays or scalars (the latter ignored).
score_or_log_prob: [batch_size, old_beam_size] array of values to sort by
for top-k selection of beam slices.
batch_size: int: size of batch.
new_beam_size: int: size of _new_ top-k selected beam dimension
Returns:
New pytree with new beam arrays containing top k new_beam_size slices.
[batch_size, old_beam_size, ...] --> [batch_size, new_beam_size, ...]
"""
_, topk_indices = lax.top_k(score_or_log_prob, k=new_beam_size)
topk_indices = jnp.flip(topk_indices, axis=1)
return gather_beams(nested, topk_indices, batch_size, new_beam_size)
# Beam search state:
@flax.struct.dataclass
class BeamState:
"""Holds beam search state data."""
# The position of the decoding loop in the length dimension.
cur_index: jax.Array # scalar int32: current decoded length index
# The active sequence log probabilities and finished sequence scores.
live_logprobs: jax.Array # float32: [batch_size, beam_size]
finished_scores: jax.Array # float32: [batch_size, beam_size]
# The current active-beam-searching and finished sequences.
live_seqs: jax.Array # int32: [batch_size, beam_size, max_decode_len]
finished_seqs: jax.Array # int32: [batch_size, beam_size,
# max_decode_len]
# Records which of the 'finished_seqs' is occupied and not a filler slot.
finished_flags: jax.Array # bool: [batch_size, beam_size]
# The current state of the autoregressive decoding caches.
cache: typing.Any # Any pytree of arrays, e.g. flax attention Cache object
def beam_init(batch_size, beam_size, max_decode_len, cache):
"""Initializes the beam search state data structure."""
cur_index0 = jnp.array(0)
live_logprobs0 = jnp.tile(
jnp.array([0.0] + [NEG_INF] * (beam_size - 1)), [batch_size, 1])
finished_scores0 = jnp.ones((batch_size, beam_size)) * NEG_INF
live_seqs0 = jnp.zeros((batch_size, beam_size, max_decode_len), jnp.int32)
finished_seqs0 = jnp.zeros((batch_size, beam_size, max_decode_len), jnp.int32)
finished_flags0 = jnp.zeros((batch_size, beam_size), jnp.bool_)
# add beam dimension to attention cache pytree elements
beam_cache0 = jax.tree_map(lambda x: add_beam_dim(x, beam_size), cache)
return BeamState(
cur_index=cur_index0,
live_logprobs=live_logprobs0,
finished_scores=finished_scores0,
live_seqs=live_seqs0,
finished_seqs=finished_seqs0,
finished_flags=finished_flags0,
cache=beam_cache0)
# Beam search routine:
def beam_search(inputs,
cache,
tokens_to_logits,
beam_size=4,
alpha=0.6,
eos_id=EOS_ID,
max_decode_len=None):
"""Beam search for transformer machine translation.
Args:
inputs: array: [batch_size, length] int32 sequence of tokens.
cache: flax attention cache.
tokens_to_logits: fast autoregressive decoder function taking single token
slices and cache and returning next-token logits and updated cache.
beam_size: int: number of beams to use in beam search.
alpha: float: scaling factor for brevity penalty.
eos_id: int: id of end-of-sentence token for target vocabulary.
max_decode_len: int: maximum length of decoded translations.
Returns:
Tuple of:
[batch_size, beam_size, max_decode_len] top-scoring sequences
[batch_size, beam_size] beam-search scores.
"""
# We liberally annotate shape information for clarity below.
batch_size = inputs.shape[0]
if max_decode_len is None:
max_decode_len = inputs.shape[1]
end_marker = jnp.array(eos_id)
# initialize beam search state
beam_search_init_state = beam_init(batch_size,
beam_size,
max_decode_len,
cache)
def beam_search_loop_cond_fn(state):
"""Beam search loop termination condition."""
# Have we reached max decoding length?
not_at_end = state.cur_index < max_decode_len - 1
# Is no further progress in the beam search possible?
# Get the best possible scores from alive sequences.
min_brevity_penalty = brevity_penalty(alpha, max_decode_len)
best_live_scores = state.live_logprobs[:, -1:] / min_brevity_penalty
# Get the worst scores from finished sequences.
worst_finished_scores = jnp.min(
state.finished_scores, axis=1, keepdims=True)
# Mask out scores from slots without any actual finished sequences.
worst_finished_scores = jnp.where(state.finished_flags,
worst_finished_scores,
NEG_INF)
# If no best possible live score is better than current worst finished
# scores, the search cannot improve the finished set further.
search_terminated = jnp.all(worst_finished_scores > best_live_scores)
# If we're not at the max decode length, and the search hasn't terminated,
# continue looping.
return not_at_end & (~search_terminated)
def beam_search_loop_body_fn(state):
"""Beam search loop state update function."""
# Collect the current position slice along length to feed the fast
# autoregressive decoder model. Flatten the beam dimension into batch
# dimension for feeding into the model.
# --> [batch * beam, 1]
flat_ids = flatten_beam_dim(
lax.dynamic_slice(state.live_seqs, (0, 0, state.cur_index),
(batch_size, beam_size, 1)))
# Flatten beam dimension into batch to be compatible with model.
# {[batch, beam, ...], ...} --> {[batch * beam, ...], ...}
flat_cache = jax.tree_map(flatten_beam_dim, state.cache)
# Call fast-decoder model on current tokens to get next-position logits.
# --> [batch * beam, vocab]
flat_logits, new_flat_cache = tokens_to_logits(flat_ids, flat_cache)
# unflatten beam dimension
# [batch * beam, vocab] --> [batch, beam, vocab]
logits = unflatten_beam_dim(flat_logits, batch_size, beam_size)
# Unflatten beam dimension in attention cache arrays
# {[batch * beam, ...], ...} --> {[batch, beam, ...], ...}
new_cache = jax.tree_map(
lambda x: unflatten_beam_dim(x, batch_size, beam_size), new_flat_cache)
# Gather log probabilities from logits
candidate_log_probs = jax.nn.log_softmax(logits)
# Add new logprobs to existing prefix logprobs.
# --> [batch, beam, vocab]
log_probs = (
candidate_log_probs + jnp.expand_dims(state.live_logprobs, axis=2))
# We'll need the vocab size, gather it from the log probability dimension.
vocab_size = log_probs.shape[2]
# Each item in batch has beam_size * vocab_size candidate sequences.
# For each item, get the top 2*k candidates with the highest log-
# probabilities. We gather the top 2*K beams here so that even if the best
# K sequences reach EOS simultaneously, we have another K sequences
# remaining to continue the live beam search.
beams_to_keep = 2 * beam_size
# Flatten beam and vocab dimensions.
flat_log_probs = log_probs.reshape((batch_size, beam_size * vocab_size))
# Gather the top 2*K scores from _all_ beams.
# --> [batch, 2*beams], [batch, 2*beams]
topk_log_probs, topk_indices = lax.top_k(flat_log_probs, k=beams_to_keep)
# Recover the beam index by floor division.
topk_beam_indices = topk_indices // vocab_size
# Gather 2*k top beams.
# --> [batch, 2*beams, length]
topk_seq = gather_beams(state.live_seqs,
topk_beam_indices,
batch_size,
beams_to_keep)
# Append the most probable 2*K token IDs to the top 2*K sequences
# Recover token id by modulo division and expand Id array for broadcasting.
# --> [batch, 2*beams, 1]
topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
# Update sequences for the 2*K top-k new sequences.
# --> [batch, 2*beams, length]
topk_seq = lax.dynamic_update_slice(topk_seq,
topk_ids, (0, 0, state.cur_index + 1))
# Update LIVE (in-progress) sequences:
# Did any of these sequences reach an end marker?
# --> [batch, 2*beams]
newly_finished = (topk_seq[:, :, state.cur_index + 1] == end_marker)
# To prevent these newly finished sequences from being added to the LIVE
# set of active beam search sequences, set their log probs to a very large
# negative value.
new_log_probs = topk_log_probs + newly_finished * NEG_INF
# Determine the top k beam indices (from top 2*k beams) from log probs.
# --> [batch, beams]
_, new_topk_indices = lax.top_k(new_log_probs, k=beam_size)
new_topk_indices = jnp.flip(new_topk_indices, axis=1)
# Gather the top k beams (from top 2*k beams).
# --> [batch, beams, length], [batch, beams]
top_alive_seq, top_alive_log_probs = gather_beams([topk_seq, new_log_probs],
new_topk_indices,
batch_size, beam_size)
# Determine the top k beam indices from the original set of all beams.
# --> [batch, beams]
top_alive_indices = gather_beams(topk_beam_indices,
new_topk_indices,
batch_size,
beam_size)
# With these, gather the top k beam-associated caches.
# --> {[batch, beams, ...], ...}
top_alive_cache = gather_beams(new_cache,
top_alive_indices,
batch_size,
beam_size)
# Update FINISHED (reached end of sentence) sequences:
# Calculate new seq scores from log probabilities.
new_scores = topk_log_probs / brevity_penalty(alpha, state.cur_index + 1)
# Mask out the still unfinished sequences by adding large negative value.
# --> [batch, 2*beams]
new_scores += (~newly_finished) * NEG_INF
# Combine sequences, scores, and flags along the beam dimension and compare
# new finished sequence scores to existing finished scores and select the
# best from the new set of beams.
finished_seqs = jnp.concatenate( # --> [batch, 3*beams, length]
[state.finished_seqs, topk_seq],
axis=1)
finished_scores = jnp.concatenate( # --> [batch, 3*beams]
[state.finished_scores, new_scores], axis=1)
finished_flags = jnp.concatenate( # --> [batch, 3*beams]
[state.finished_flags, newly_finished], axis=1)
# --> [batch, beams, length], [batch, beams], [batch, beams]
top_finished_seq, top_finished_scores, top_finished_flags = (
gather_topk_beams([finished_seqs, finished_scores, finished_flags],
finished_scores, batch_size, beam_size))
return BeamState(
cur_index=state.cur_index + 1,
live_logprobs=top_alive_log_probs,
finished_scores=top_finished_scores,
live_seqs=top_alive_seq,
finished_seqs=top_finished_seq,
finished_flags=top_finished_flags,
cache=top_alive_cache)
# Run while loop and get final beam search state.
final_state = lax.while_loop(beam_search_loop_cond_fn,
beam_search_loop_body_fn,
beam_search_init_state)
# Account for the edge-case where there are no finished sequences for a
# particular batch item. If so, return live sequences for that batch item.
# --> [batch]
none_finished = jnp.any(final_state.finished_flags, axis=1)
# --> [batch, beams, length]
finished_seqs = jnp.where(none_finished[:, None, None],
final_state.finished_seqs,
final_state.live_seqs)
# --> [batch, beams]
finished_scores = jnp.where(none_finished[:, None],
final_state.finished_scores,
final_state.live_logprobs)
return finished_seqs, finished_scores
|
"""ImageNet ViT workload."""
from typing import Dict, Iterator, Optional
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.workload import \
BaseImagenetResNetWorkload
def decode_variant(variant: str) -> Dict[str, int]:
"""Converts a string like 'B/32' into a params dict."""
v, patch = variant.split('/')
return {
# Reference: Table 2 of https://arxiv.org/abs/2106.04560.
'width': {
'Ti': 192,
'S': 384,
'M': 512,
'B': 768,
'L': 1024,
'H': 1280,
'g': 1408,
'G': 1664,
}[v],
'depth': {
'Ti': 12,
'S': 12,
'M': 12,
'B': 12,
'L': 24,
'H': 32,
'g': 40,
'G': 48,
}[v],
'mlp_dim': {
'Ti': 768,
'S': 1536,
'M': 2048,
'B': 3072,
'L': 4096,
'H': 5120,
'g': 6144,
'G': 8192,
}[v],
'num_heads': {
'Ti': 3, 'S': 6, 'M': 8, 'B': 12, 'L': 16, 'H': 16, 'g': 16, 'G': 16
}[v],
'patch_size': (int(patch), int(patch)),
}
class BaseImagenetVitWorkload(BaseImagenetResNetWorkload):
@property
def validation_target_value(self) -> float:
return 1 - 0.22691 # 0.77309
@property
def test_target_value(self) -> float:
return 1 - 0.3481 # 0.6519
@property
def eval_batch_size(self) -> int:
return 2048
@property
def max_allowed_runtime_sec(self) -> int:
return 77_520 # ~22 hours
@property
def eval_period_time_sec(self) -> int:
return 7 * 60 # 7 mins.
def _build_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
use_mixup: bool = False,
use_randaug: bool = False) -> Iterator[Dict[str, spec.Tensor]]:
# We use mixup and Randaugment for ViT workloads.
use_mixup = use_randaug = split == 'train'
return super()._build_dataset(data_rng,
split,
data_dir,
global_batch_size,
cache,
repeat_final_dataset,
use_mixup,
use_randaug)
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 186_666
|
"""PyTorch implementation of refactored and simplified ViT.
Adapted from:
https://github.com/huggingface/transformers/tree/main/src/transformers/models/vit.
https://github.com/lucidrains/vit-pytorch.
"""
import math
from typing import Any, Optional, Tuple, Union
import torch
from torch import nn
import torch.nn.functional as F
from algorithmic_efficiency import init_utils
from algorithmic_efficiency import spec
def posemb_sincos_2d(patches: spec.Tensor, temperature=10_000.) -> spec.Tensor:
_, width, h, w = patches.shape
device = patches.device
y, x = torch.meshgrid(torch.arange(h, device=device),
torch.arange(w, device=device), indexing='ij')
if width % 4 != 0:
raise ValueError('Width must be mult of 4 for sincos posemb.')
omega = torch.arange(width // 4, device=device) / (width // 4 - 1)
omega = 1. / (temperature**omega)
y = y.flatten()[:, None] * omega[None, :]
x = x.flatten()[:, None] * omega[None, :]
pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1)
return pe[None, :, :]
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block."""
def __init__(
self,
width: int,
mlp_dim: Optional[int] = None, # Defaults to 4x input dim.
dropout_rate: float = 0.0) -> None:
super().__init__()
self.width = width
self.mlp_dim = mlp_dim or 4 * width
self.dropout_rate = dropout_rate
self.net = nn.Sequential(
nn.Linear(self.width, self.mlp_dim),
nn.GELU(),
nn.Dropout(self.dropout_rate),
nn.Linear(self.mlp_dim, self.width))
self.reset_parameters()
def reset_parameters(self) -> None:
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
if module.bias is not None:
module.bias.data.normal_(std=1e-6)
def forward(self, x: spec.Tensor) -> spec.Tensor:
return self.net(x)
class SelfAttention(nn.Module):
"""Self-attention special case of multi-head dot-product attention."""
def __init__(self,
width: int,
num_heads: int = 8,
dropout_rate: float = 0.0) -> None:
super().__init__()
self.width = width
self.num_heads = num_heads
assert width % num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
self.head_dim = int(width / num_heads)
self.all_head_dim = self.num_heads * self.head_dim
self.query = nn.Linear(self.width, self.all_head_dim)
self.key = nn.Linear(self.width, self.all_head_dim)
self.value = nn.Linear(self.width, self.all_head_dim)
self.dropout = nn.Dropout(dropout_rate)
self.out = nn.Linear(self.width, self.width)
self.reset_parameters()
def reset_parameters(self) -> None:
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
if module.bias is not None:
nn.init.constant_(module.bias.data, 0.)
def transpose_for_scores(self, x: spec.Tensor) -> spec.Tensor:
new_x_shape = x.size()[:-1] + (self.num_heads, self.head_dim)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, x: spec.Tensor) -> spec.Tensor:
mixed_query_layer = self.query(x)
key_layer = self.transpose_for_scores(self.key(x))
value_layer = self.transpose_for_scores(self.value(x))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.head_dim)
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_dim,)
context_layer = context_layer.view(new_context_layer_shape)
out = self.out(context_layer)
return out
class Encoder1DBlock(nn.Module):
"""Single transformer encoder block (MHSA + MLP)."""
def __init__(self,
width: int,
mlp_dim: Optional[int] = None,
num_heads: int = 12,
dropout_rate: float = 0.0) -> None:
super().__init__()
self.width = width
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.layer_norm0 = nn.LayerNorm(self.width, eps=1e-6)
self.self_attention1 = SelfAttention(self.width, self.num_heads)
self.dropout = nn.Dropout(dropout_rate)
self.layer_norm2 = nn.LayerNorm(self.width, eps=1e-6)
self.mlp3 = MlpBlock(self.width, self.mlp_dim, dropout_rate)
def forward(self, x: spec.Tensor) -> spec.Tensor:
y = self.layer_norm0(x)
y = self.self_attention1(y)
y = self.dropout(y)
x = x + y
y = self.layer_norm2(x)
y = self.mlp3(y)
y = self.dropout(y)
x = x + y
return x
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation."""
def __init__(self,
depth: int,
width: int,
mlp_dim: Optional[int] = None,
num_heads: int = 12,
dropout_rate: float = 0.0) -> None:
super().__init__()
self.depth = depth
self.width = width
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.net = nn.ModuleList([
Encoder1DBlock(self.width, self.mlp_dim, self.num_heads, dropout_rate)
for _ in range(depth)
])
self.encoder_norm = nn.LayerNorm(self.width, eps=1e-6)
def forward(self, x: spec.Tensor) -> spec.Tensor:
# Input Encoder.
for block in self.net:
x = block(x)
return self.encoder_norm(x)
class ViT(nn.Module):
"""ViT model."""
image_height: int = 224
image_width: int = 224
channels: int = 3
def __init__(
self,
num_classes: int = 1000,
patch_size: Tuple[int, int] = (16, 16),
width: int = 768,
depth: int = 12,
mlp_dim: Optional[int] = None, # Defaults to 4x input dim.
num_heads: int = 12,
rep_size: Union[int, bool] = True,
dropout_rate: Optional[float] = 0.0,
head_zeroinit: bool = True,
dtype: Any = torch.float32) -> None:
super().__init__()
if dropout_rate is None:
dropout_rate = 0.0
self.num_classes = num_classes
self.patch_size = patch_size
self.width = width
self.depth = depth
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.rep_size = rep_size
self.head_zeroinit = head_zeroinit
self.dtype = dtype
if self.rep_size:
rep_size = self.width if self.rep_size is True else self.rep_size
self.pre_logits = nn.Linear(self.width, rep_size)
self.conv_patch_extract = nn.Conv2d(
self.channels,
self.width,
self.patch_size,
stride=self.patch_size,
padding='valid')
self.dropout = nn.Dropout(p=dropout_rate)
self.encoder = Encoder(
depth=self.depth,
width=self.width,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=dropout_rate)
if self.num_classes:
self.head = nn.Linear(self.width, self.num_classes)
self.reset_parameters()
def reset_parameters(self) -> None:
init_utils.pytorch_default_init(self.conv_patch_extract)
if self.rep_size:
init_utils.pytorch_default_init(self.pre_logits)
if self.num_classes:
if self.head_zeroinit:
nn.init.constant_(self.head.weight.data, 0.)
nn.init.constant_(self.head.bias.data, 0.)
else:
init_utils.pytorch_default_init(self.head)
def get_posemb(self, x: spec.Tensor) -> spec.Tensor:
return posemb_sincos_2d(x).type(self.dtype)
def forward(self, x: spec.Tensor) -> spec.Tensor:
# Patch extraction.
x = self.conv_patch_extract(x)
# Add posemb before adding extra token.
n, c, h, w = x.shape
pes = self.get_posemb(x)
# Reshape to match Jax's ViT implementation.
x = torch.transpose(torch.reshape(x, (n, c, h * w)), 1, 2)
x = x + pes
x = self.dropout(x)
x = self.encoder(x)
x = torch.mean(x, dim=1)
if self.rep_size:
x = torch.tanh(self.pre_logits(x))
if self.num_classes:
x = self.head(x)
return x
|
"""ImageNet ViT workload implemented in PyTorch."""
import contextlib
from typing import Dict, Optional, Tuple
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import \
ImagenetResNetWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch import \
models
from algorithmic_efficiency.workloads.imagenet_vit.workload import \
BaseImagenetVitWorkload
from algorithmic_efficiency.workloads.imagenet_vit.workload import \
decode_variant
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
# Make sure we inherit from the ViT base workload first.
class ImagenetVitWorkload(BaseImagenetVitWorkload, ImagenetResNetWorkload):
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
del aux_dropout_rate
torch.random.manual_seed(rng[0])
model = models.ViT(
dropout_rate=dropout_rate,
num_classes=self._num_classes,
**decode_variant('S/16'))
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['head.weight', 'head.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
del update_batch_norm
model = params
if mode == spec.ForwardPassMode.EVAL:
model.eval()
if mode == spec.ForwardPassMode.TRAIN:
model.train()
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits_batch = model(augmented_and_preprocessed_input_batch['inputs'])
return logits_batch, None
|
"""Jax implementation of refactored and simplified ViT.
Forked from:
https://github.com/google/init2winit/blob/master/init2winit/model_lib/vit.py,
originally from https://github.com/google/big_vision with modifications noted.
"""
from typing import Optional, Sequence, Union
from flax import linen as nn
import jax.numpy as jnp
from algorithmic_efficiency import spec
def posemb_sincos_2d(h: int,
w: int,
width: int,
temperature: int = 10_000.,
dtype: jnp.dtype = jnp.float32) -> spec.Tensor:
"""Follows the MoCo v3 logic."""
y, x = jnp.mgrid[:h, :w] #pylint: disable=unpacking-non-sequence
if width % 4 != 0:
raise ValueError('Width must be mult of 4 for sincos posemb.')
omega = jnp.arange(width // 4) / (width // 4 - 1)
omega = 1. / (temperature**omega)
y = jnp.einsum('m,d->md', y.flatten(), omega)
x = jnp.einsum('m,d->md', x.flatten(), omega)
pe = jnp.concatenate([jnp.sin(x), jnp.cos(x), jnp.sin(y), jnp.cos(y)], axis=1)
return jnp.asarray(pe, dtype)[None, :, :]
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block."""
mlp_dim: Optional[int] = None # Defaults to 4x input dim.
dropout_rate: float = 0.0
@nn.compact
def __call__(self, x: spec.Tensor, train: bool = True) -> spec.Tensor:
"""Applies Transformer MlpBlock module."""
inits = {
'kernel_init': nn.initializers.xavier_uniform(),
'bias_init': nn.initializers.normal(stddev=1e-6),
}
d = x.shape[2]
x = nn.Dense(self.mlp_dim or 4 * d, **inits)(x)
x = nn.gelu(x)
x = nn.Dropout(rate=self.dropout_rate)(x, train)
x = nn.Dense(d, **inits)(x)
return x
class Encoder1DBlock(nn.Module):
"""Single transformer encoder block (MHSA + MLP)."""
mlp_dim: Optional[int] = None # Defaults to 4x input dim.
num_heads: int = 12
dropout_rate: float = 0.0
@nn.compact
def __call__(self, x: spec.Tensor, train: bool = True) -> spec.Tensor:
y = nn.LayerNorm(name='LayerNorm_0')(x)
y = nn.SelfAttention(
num_heads=self.num_heads,
kernel_init=nn.initializers.xavier_uniform(),
deterministic=train,
name='MultiHeadDotProductAttention_1')(
y)
y = nn.Dropout(rate=self.dropout_rate)(y, train)
x = x + y
y = nn.LayerNorm(name='LayerNorm_2')(x)
y = MlpBlock(
mlp_dim=self.mlp_dim, dropout_rate=self.dropout_rate,
name='MlpBlock_3')(y, train)
y = nn.Dropout(rate=self.dropout_rate)(y, train)
x = x + y
return x
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation."""
depth: int
mlp_dim: Optional[int] = None # Defaults to 4x input dim.
num_heads: int = 12
dropout_rate: float = 0.0
@nn.compact
def __call__(self, x: spec.Tensor, train: bool = True) -> spec.Tensor:
# Input Encoder
for lyr in range(self.depth):
block = Encoder1DBlock(
name=f'encoderblock_{lyr}',
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate)
x = block(x, train)
return nn.LayerNorm(name='encoder_layernorm')(x)
class ViT(nn.Module):
"""ViT model."""
num_classes: int = 1000
patch_size: Sequence[int] = (16, 16)
width: int = 768
depth: int = 12
mlp_dim: Optional[int] = None # Defaults to 4x input dim.
num_heads: int = 12
rep_size: Union[int, bool] = True
dropout_rate: Optional[float] = 0.0 # If None, defaults to 0.0.
reinit: Optional[Sequence[str]] = None
head_zeroinit: bool = True
def get_posemb(self,
seqshape: tuple,
width: int,
dtype: jnp.dtype = jnp.float32) -> spec.Tensor:
return posemb_sincos_2d(*seqshape, width, dtype=dtype)
@nn.compact
def __call__(self, x: spec.Tensor, *, train: bool = False) -> spec.Tensor:
# Patch extraction
x = nn.Conv(
self.width,
self.patch_size,
strides=self.patch_size,
padding='VALID',
name='conv_patch_extract')(
x)
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# Add posemb before adding extra token.
x = x + self.get_posemb((h, w), c, x.dtype)
dropout_rate = self.dropout_rate
if dropout_rate is None:
dropout_rate = 0.0
x = nn.Dropout(rate=dropout_rate)(x, not train)
x = Encoder(
depth=self.depth,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=dropout_rate,
name='Transformer')(
x, train=not train)
x = jnp.mean(x, axis=1)
if self.rep_size:
rep_size = self.width if self.rep_size is True else self.rep_size
hid = nn.Dense(rep_size, name='pre_logits')
x = nn.tanh(hid(x))
if self.num_classes:
kw = {'kernel_init': nn.initializers.zeros} if self.head_zeroinit else {}
head = nn.Dense(self.num_classes, name='head', **kw)
x = head(x)
return x
|
"""ImageNet workload implemented in Jax."""
from typing import Dict, Optional, Tuple
from flax import jax_utils
from flax import linen as nn
import jax
import jax.numpy as jnp
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \
ImagenetResNetWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax import models
from algorithmic_efficiency.workloads.imagenet_vit.workload import \
BaseImagenetVitWorkload
from algorithmic_efficiency.workloads.imagenet_vit.workload import \
decode_variant
# Make sure we inherit from the ViT base workload first.
class ImagenetVitWorkload(BaseImagenetVitWorkload, ImagenetResNetWorkload):
def initialized(self, key: spec.RandomState,
model: nn.Module) -> spec.ModelInitState:
input_shape = (1, 224, 224, 3)
variables = jax.jit(model.init)({'params': key}, jnp.ones(input_shape))
model_state, params = variables.pop('params')
return params, model_state
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
del aux_dropout_rate
self._model = models.ViT(
dropout_rate=dropout_rate,
num_classes=self._num_classes,
**decode_variant('S/16'))
params, model_state = self.initialized(rng, self._model)
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
model_state = jax_utils.replicate(model_state)
params = jax_utils.replicate(params)
return params, model_state
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'head'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del update_batch_norm
train = mode == spec.ForwardPassMode.TRAIN
logits = self._model.apply({'params': params},
augmented_and_preprocessed_input_batch['inputs'],
rngs={'dropout': rng},
train=train)
return logits, None
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
model_state = None
return super()._eval_model_on_split(split,
num_examples,
global_batch_size,
params,
model_state,
rng,
data_dir,
global_step)
|
from clu import metrics
import flax
import numpy as np
import tensorflow as tf
import tensorflow_text as tftxt
gfile = tf.io.gfile
def average_ctc_loss():
"""Returns a clu.Metric that computes average CTC loss
taking padding into account.
"""
@flax.struct.dataclass
class _Metric(metrics.Metric):
"""Applies `fun` and computes the average."""
total: np.float32
weight: np.float32
@classmethod
def from_model_output(cls, loss_dict, **_):
return cls(
total=loss_dict['summed'], weight=loss_dict['n_valid_examples'])
def merge(self, other):
return type(self)(
total=self.total + other.total, weight=self.weight + other.weight)
def compute(self):
return self.total / self.weight
return _Metric
def edit_distance(source, target):
"""Computes edit distance between source string and target string.
This function assumes words are seperated by a single space.
Args:
source: source string.
target: target string.
Returns:
Edit distance between source string and target string.
"""
source = source.split()
target = target.split()
num_source_words = len(source)
num_target_words = len(target)
distance = np.zeros((num_source_words + 1, num_target_words + 1))
for i in range(num_source_words + 1):
for j in range(num_target_words + 1):
# If first string is empty, only option is to
# insert all words of second string.
if i == 0:
distance[i][j] = j # Min. operations = j
# If second string is empty, only option is to
# remove all characters of second string.
elif j == 0:
distance[i][j] = i # Min. operations = i
# If last characters are same, ignore last char
# and recur for remaining string.
elif source[i - 1] == target[j - 1]:
distance[i][j] = distance[i - 1][j - 1]
# If last character are different, consider all
# possibilities and find minimum.
else:
distance[i][j] = 1 + min(
distance[i][j - 1], # Insert
distance[i - 1][j], # Remove
distance[i - 1][j - 1]) # Replace
return distance[num_source_words][num_target_words]
def compute_wer(decoded, decoded_paddings, targets, target_paddings, tokenizer):
word_errors = 0.0
num_words = 0.0
decoded_lengths = np.sum(decoded_paddings == 0.0, axis=-1)
target_lengths = np.sum(target_paddings == 0.0, axis=-1)
batch_size = targets.shape[0]
for i in range(batch_size):
decoded_length = decoded_lengths[i]
target_length = target_lengths[i]
decoded_i = decoded[i][:decoded_length]
target_i = targets[i][:target_length]
decoded_i = str(tokenizer.detokenize(decoded_i.astype(np.int32)))
target_i = str(tokenizer.detokenize(target_i.astype(np.int32)))
target_i = ' '.join(target_i.split())
target_num_words = len(target_i.split(' '))
word_errors += edit_distance(decoded_i, target_i)
num_words += target_num_words
return word_errors, num_words
def load_tokenizer(model_path: str,
add_bos: bool = False,
add_eos: bool = True,
reverse: bool = False):
"""Load a tf-text SentencePiece tokenizer from given model filepath."""
if model_path is None:
return None
with gfile.GFile(model_path, 'rb') as model_fp:
sp_model = model_fp.read()
sp_tokenizer = tftxt.SentencepieceTokenizer(
model=sp_model, add_bos=add_bos, add_eos=add_eos, reverse=reverse)
return sp_tokenizer
def wer(tokenizer_vocab_path):
tokenizer = load_tokenizer(tokenizer_vocab_path)
@flax.struct.dataclass
class WER(
metrics.CollectingMetric.from_outputs(
('decoded', 'decoded_paddings', 'targets', 'target_paddings'))):
"""Computes the mean average precision for a binary classifier on CPU."""
def compute(self):
if tokenizer is None:
return 0
values = super().compute()
# Ensure the arrays are numpy and not jax.numpy.
values = {k: np.array(v) for k, v in values.items()}
word_errors, num_words = compute_wer(
values['decoded'],
values['decoded_paddings'],
values['targets'].astype(np.int32),
values['target_paddings'],
tokenizer)
return word_errors / num_words
return WER
def get_metrics_bundle(tokenizer_vocab_path):
return metrics.Collection.create(
ctc_loss=average_ctc_loss(), wer=wer(tokenizer_vocab_path))
|
"""Sharing the jax input pipeline slows down the data loading
and step times.
"""
import csv
from absl import logging
import numpy as np
import torch
class LibriSpeechDataset(torch.utils.data.Dataset):
def __init__(self, split, data_dir):
super().__init__()
self.data_dir = data_dir
splits = split.split('+')
ids = []
for split in splits:
logging.info('Loading split = %s', split)
feat_csv = '{}/{}.csv'.format(data_dir, split)
with open(feat_csv, newline='') as csvfile:
data = list(csv.reader(csvfile))
for example in data[1:]:
ids.append('{}/{}'.format(split, example[1]))
self.ids = ids
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
example_id = self.ids[index]
data_dir = self.data_dir
audio = np.load('{}/{}_audio.npy'.format(data_dir, example_id))
targets = np.load('{}/{}_targets.npy'.format(data_dir, example_id))
audio_paddings = np.zeros_like(audio, dtype=np.float32)
audio_paddings = np.pad(
audio_paddings, (0, 320000 - audio.shape[0]), constant_values=1.0)
audio = np.pad(audio, (0, 320000 - audio.shape[0]), constant_values=0.0)
target_paddings = np.zeros_like(targets, dtype=np.float32)
target_paddings = np.pad(
target_paddings, (0, 256 - target_paddings.shape[0]),
constant_values=1.0)
targets = np.pad(targets, (0, 256 - targets.shape[0]), constant_values=0)
audio = audio.astype(np.float32)
audio_paddings = audio_paddings.astype(np.float32)
targets = targets.astype(np.float32)
target_paddings = target_paddings.astype(np.float32)
return (audio, audio_paddings), (targets, target_paddings)
|
import math
from typing import Dict
from algorithmic_efficiency import spec
class BaseLibrispeechWorkload(spec.Workload):
_num_outputs: int = 1024
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'wer'
def has_reached_validation_target(self, eval_result: Dict[str,
float]) -> bool:
return eval_result['validation/wer'] < self.validation_target_value
@property
def validation_target_value(self) -> float:
return 0.078477
def has_reached_test_target(self, eval_result: Dict[str, float]) -> bool:
return eval_result['test/wer'] < self.test_target_value
@property
def test_target_value(self) -> float:
return 0.046973
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.CTC_LOSS
@property
def num_train_examples(self) -> int:
return 263840
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
return 5348
@property
def num_test_examples(self) -> int:
return 2472
@property
def eval_batch_size(self) -> int:
return 256
@property
def train_mean(self):
raise NotImplementedError
@property
def train_stddev(self):
raise NotImplementedError
@property
def max_allowed_runtime_sec(self) -> int:
return 101_780 # ~28 hours
@property
def eval_period_time_sec(self) -> int:
return 40 * 60 # 40m
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 133_333
|
"""A flax layer to do data augmentation for audio signals as
described in https://arxiv.org/abs/1904.08779.
Code based on:
github.com/tensorflow/lingvo/blob/master/lingvo/jax/layers/spectrum_augmenter.py
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class SpecAug(nn.Module):
"""Layer performs masking prodecure along time and frequency axis.
The procedure is detailed in https://arxiv.org/abs/1904.08779.
This is an essential component in speech recognition models that helps achieve
better word error rates.
"""
freq_mask_count: int = 2
freq_mask_max_bins: int = 27
time_mask_count: int = 10
time_mask_max_frames: int = 40
time_mask_max_ratio: float = 0.05
time_masks_per_frame: float = 0.0
use_dynamic_time_mask_max_frames: bool = True
def next_prng_key(self, name='dropout'):
return self.make_rng(name)
def _get_mask(self,
batch_size,
choose_range,
mask_size,
max_length=None,
masks_per_frame=0.0,
multiplicity=1,
max_ratio=1.0):
# Sample lengths for multiple masks.
if max_length and max_length > 0:
max_length = jnp.tile(max_length, (batch_size,))
else:
max_length = choose_range * max_ratio
masked_portion = jax.random.uniform(
key=self.next_prng_key(),
shape=(batch_size, multiplicity),
minval=0.0,
maxval=1.0)
masked_frame_size = jnp.einsum('b,bm->bm', max_length,
masked_portion).astype(jnp.int32)
# Make sure the sampled length was smaller than max_ratio * length_bound.
# Note that sampling in this way was biased
# (shorter sequence may over-masked.)
choose_range = jnp.tile(choose_range[:, None], [1, multiplicity])
length_bound = (max_ratio * choose_range).astype(jnp.int32)
length = jnp.minimum(masked_frame_size, jnp.maximum(length_bound, 1))
# Choose starting point.
random_start = jax.random.uniform(
key=self.next_prng_key(), shape=(batch_size, multiplicity), maxval=1.0)
start_with_in_valid_range = random_start * (choose_range - length + 1)
start = start_with_in_valid_range.astype(jnp.int32)
end = start + length - 1
# Shift starting and end point by small value.
delta = 0.1
start = jnp.expand_dims(start - delta, -1)
start = jnp.tile(start, [1, 1, mask_size])
end = jnp.expand_dims(end + delta, -1)
end = jnp.tile(end, [1, 1, mask_size])
# Construct pre-mask of shape (batch_size, multiplicity, mask_size).
diagonal = jnp.expand_dims(jnp.expand_dims(jnp.arange(mask_size), 0), 0)
diagonal = jnp.tile(diagonal, [batch_size, multiplicity, 1])
pre_mask = jnp.minimum(diagonal < end, diagonal > start)
# Sum masks with appropriate multiplicity.
if masks_per_frame > 0:
multiplicity_weights = jnp.tile(
jnp.expand_dims(jnp.arange(multiplicity, dtype=jnp.int32), 0),
[batch_size, 1])
multiplicity_tensor = masks_per_frame * choose_range
multiplicity_weights = (multiplicity_weights <
multiplicity_tensor).astype(jnp.int32)
pre_mask = jnp.einsum('bmt,bm->bt', pre_mask, multiplicity_weights)
else:
pre_mask = jnp.einsum('bmt->bt', pre_mask)
mask = 1.0 - (pre_mask > 0).astype(jnp.int32)
return mask
def _time_mask(self, inputs, length):
# Get time masking parameters.
time_mask_max_frames = self.time_mask_max_frames
use_dynamic_time_mask_max_frames = self.use_dynamic_time_mask_max_frames
multiplicity = self.time_mask_count
max_ratio = self.time_mask_max_ratio
# If maximum mask length is zero, do nothing.
if ((time_mask_max_frames == 0 and not use_dynamic_time_mask_max_frames) or
max_ratio <= 0.0):
return inputs
if multiplicity == 0:
return inputs
batch_size, time_length, _ = inputs.shape
# When using dynamic time mask size, discard upper-bound on
# maximum allowed frames for time mask.
if use_dynamic_time_mask_max_frames:
time_mask_max_frames = None
# Create masks in time direction and apply.
block_arrays = self._get_mask(
batch_size,
choose_range=length,
mask_size=time_length,
max_length=time_mask_max_frames,
masks_per_frame=self.time_masks_per_frame,
multiplicity=multiplicity,
max_ratio=max_ratio)
outputs = jnp.einsum('bxy,bx->bxy', inputs, block_arrays)
return outputs
def _frequency_mask(self, inputs):
# Mask parameters.
freq_mask_max_bins = self.freq_mask_max_bins
multiplicity = self.freq_mask_count
# If masking length or count is zero, do nothing.
if freq_mask_max_bins == 0 or multiplicity == 0:
return inputs
# Arguments to pass to mask generator.
batch_size, _, num_freq = inputs.shape
choose_range = jnp.tile(num_freq, (batch_size,))
# Create masks in frequency direction and apply.
block_arrays = self._get_mask(
batch_size,
choose_range=choose_range,
mask_size=num_freq,
max_length=freq_mask_max_bins,
masks_per_frame=0.0,
multiplicity=multiplicity,
max_ratio=1.0)
outputs = jnp.einsum('bxy,by->bxy', inputs, block_arrays)
return outputs
@nn.compact
def __call__(self, inputs, paddings):
lengths = jnp.einsum('bh->b', 1 - paddings).astype(jnp.int32)
inputs = self._time_mask(inputs, lengths)
inputs = self._frequency_mask(inputs)
return inputs, paddings
|
r"""Conformer.
This model uses a conformer network to convert speech to text.
paper : https://arxiv.org/abs/2005.08100
high-level overview of Conformer encoder layer.
x = x + 0.5 * FeedForward(x)
x = x + MHSA(x)
x = x + ConvolutionBlock(x)
x = x + 0.5 * FeedForward(x)
y = layer_norm(x)
"""
import math
from typing import Any, List, Optional
from flax import linen as nn
from flax import struct
import jax
import jax.numpy as jnp
import numpy as np
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \
librispeech_preprocessor as preprocessor
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \
spectrum_augmenter
@struct.dataclass
class ConformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int = 1024
dtype: Any = jnp.float32
encoder_dim: int = 512
num_attention_heads: int = 8
num_encoder_layers: int = 4
attention_dropout_rate: float = 0.0
# If None, defaults to 0.1.
attention_residual_dropout_rate: Optional[float] = 0.1
# If None, defaults to 0.0.
conv_residual_dropout_rate: Optional[float] = 0.0
feed_forward_dropout_rate: float = 0.0
# If None, defaults to 0.1.
feed_forward_residual_dropout_rate: Optional[float] = 0.1
convolution_kernel_size: int = 5
feed_forward_expansion_factor: int = 4
freq_mask_count: int = 2
freq_mask_max_bins: int = 27
time_mask_count: int = 10
time_mask_max_frames: int = 40
time_mask_max_ratio: float = 0.05
time_masks_per_frame: float = 0.0
use_dynamic_time_mask_max_frames: bool = True
# If None, defaults to 0.1.
input_dropout_rate: Optional[float] = 0.1
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
use_specaug: bool = True
class LayerNorm(nn.Module):
"""Module implementing layer normalization.
This implementation is same as in this paper:
https://arxiv.org/pdf/1607.06450.pdf.
note: we multiply normalized inputs by (1 + scale) and initialize scale to
zeros, this differs from default flax implementation of multiplying by scale
and initializing to ones.
"""
dim: int = 0
epsilon: float = 1e-6
def setup(self):
self.scale = self.param('scale', nn.initializers.zeros, [self.dim])
self.bias = self.param('bias', nn.initializers.zeros, [self.dim])
@nn.compact
def __call__(self, inputs):
mean = jnp.mean(inputs, axis=[-1], keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=[-1], keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.epsilon)
normed_inputs *= (1 + self.scale)
normed_inputs += self.bias
return normed_inputs
class Subsample(nn.Module):
"""Module to perform strided convolution in order to subsample inputs.
Attributes:
encoder_dim: model dimension of conformer.
input_dropout_rate: dropout rate for inputs.
"""
encoder_dim: int = 0
input_dropout_rate: float = 0.0
@nn.compact
def __call__(self, inputs, input_paddings, train):
output_paddings = input_paddings
outputs = jnp.expand_dims(inputs, axis=-1)
outputs, output_paddings = Conv2dSubsampling(
input_channels=1, output_channels=self.encoder_dim)(
outputs, output_paddings)
outputs, output_paddings = Conv2dSubsampling(
input_channels=self.encoder_dim,
output_channels=self.encoder_dim)(outputs, output_paddings)
batch_size, subsampled_lengths, subsampled_dims, channels = outputs.shape
outputs = jnp.reshape(
outputs, (batch_size, subsampled_lengths, subsampled_dims * channels))
outputs = nn.Dense(
self.encoder_dim,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
outputs)
outputs = outputs + AddPositionalEmbedding(embedding_dim=self.encoder_dim)(
seq_length=outputs.shape[1])
outputs = nn.Dropout(
rate=self.input_dropout_rate, deterministic=not train)(
outputs)
return outputs, output_paddings
class Conv2dSubsampling(nn.Module):
"""Helper module used in Subsample layer.
1) Performs strided convolution over inputs and then applies non-linearity.
2) Also performs strided convolution over input_paddings to return the correct
paddings for downstream layers.
"""
input_channels: int = 0
output_channels: int = 0
filter_stride: List[int] = (2, 2)
padding: str = 'SAME'
def setup(self):
self.filter_shape = (3, 3, self.input_channels, self.output_channels)
self.kernel = self.param('kernel',
nn.initializers.xavier_uniform(),
self.filter_shape)
self.bias = self.param(
'bias', lambda rng, s: jnp.zeros(s, jnp.float32), self.output_channels)
@nn.compact
def __call__(self, inputs, paddings):
# Computing strided convolution to subsample inputs.
feature_group_count = inputs.shape[3] // self.filter_shape[2]
outputs = jax.lax.conv_general_dilated(
lhs=inputs,
rhs=self.kernel,
window_strides=self.filter_stride,
padding=self.padding,
rhs_dilation=(1, 1),
dimension_numbers=('NHWC', 'HWIO', 'NHWC'),
feature_group_count=feature_group_count)
outputs += jnp.reshape(self.bias, (1,) * (outputs.ndim - 1) + (-1,))
outputs = nn.relu(outputs)
# Computing correct paddings post input convolution.
input_length = paddings.shape[1]
stride = self.filter_stride[0]
pad_len = (input_length + stride - 1) // stride * stride - input_length
out_padding = jax.lax.conv_general_dilated(
lhs=paddings[:, :, None],
rhs=jnp.ones([1, 1, 1]),
window_strides=self.filter_stride[:1],
padding=[(0, pad_len)],
dimension_numbers=('NHC', 'HIO', 'NHC'))
out_padding = jnp.squeeze(out_padding, axis=-1)
# Mask outputs by correct paddings to ensure padded elements in inputs map
# to padded value in outputs.
outputs = outputs * \
(1.0 - jnp.expand_dims(jnp.expand_dims(out_padding, -1), -1))
return outputs, out_padding
class FeedForwardModule(nn.Module):
"""Feedforward block of conformer layer.
"""
config: ConformerConfig
@nn.compact
def __call__(self, inputs, padding_mask=None, train=False):
config = self.config
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
inputs = nn.Dense(
config.encoder_dim * config.feed_forward_expansion_factor,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
inputs)
inputs = nn.swish(inputs)
inputs = nn.Dropout(rate=config.feed_forward_dropout_rate)(
inputs, deterministic=not train)
inputs = inputs * padding_mask
inputs = nn.Dense(
config.encoder_dim,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
inputs)
inputs = inputs * padding_mask
if config.feed_forward_residual_dropout_rate is None:
feed_forward_residual_dropout_rate = 0.1
else:
feed_forward_residual_dropout_rate = (
config.feed_forward_residual_dropout_rate)
inputs = nn.Dropout(rate=feed_forward_residual_dropout_rate)(
inputs, deterministic=not train)
return inputs
class AddPositionalEmbedding(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
max_len: maximum possible length for the input
posemb_init: positional embedding initializer
"""
min_timescale: int = 1
max_timescale: int = 10_000
embedding_dim: int = 512
@nn.compact
def __call__(self, seq_length):
position = jnp.arange(seq_length, dtype=jnp.float32)[jnp.newaxis, :]
num_timescales = self.embedding_dim // 2
log_timescale_increment = (
math.log(float(self.max_timescale) / float(self.min_timescale)) /
jnp.maximum(jnp.asarray(num_timescales, dtype=jnp.float32) - 1, 1))
inv_timescales = self.min_timescale * jnp.exp(
jnp.arange(num_timescales, dtype=jnp.float32) *
-log_timescale_increment)
scaled_time = (
position[:, :, jnp.newaxis] *
inv_timescales[jnp.newaxis, jnp.newaxis, :])
signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)],
axis=2).astype(jnp.float32)
# Force usage of `np` rather than `jnp` to compute static values at trace
# time.
signal = jnp.pad(signal,
[[0, 0], [0, 0], [0, np.mod(self.embedding_dim, 2)]])
return signal
# Adapted from lingvo attention layer for query scaling
# https://github.com/tensorflow/lingvo/blob/7de4ca8fff3cb28c2ecb21bbd7b02a964ce727f7/lingvo/jax/layers/attentions.py#L201
class QueryScaler(nn.Module):
"""A layer to scale individual dims of the query attention matrix."""
dim: int = 0
def setup(self):
self.scale = self.param('scale', nn.initializers.zeros, [self.dim])
@nn.compact
def __call__(self, inputs):
inputs_shape = inputs.shape
if inputs_shape[-1] != self.dim:
raise ValueError('QueryScaler expects inputs to have'
' same last dimension as scaling param.')
# 1.0/jax.nn.softplus(0.0) = 1.442695041. Hard code this number so that we
# can avoid unnecessary XLA op fusion mess on TPU.
r_softplus_0 = 1.442695041
scale = jnp.array(r_softplus_0, dtype=inputs.dtype)
scale *= jax.nn.softplus(self.scale)
return inputs * scale
# Modifying flax linen default dot product attention function to add
# query scaling, reference to original function here :
# https://github.com/google/flax/blob/a9af38085a7a49b571cf37d375060fd683e74972/flax/linen/attention.py#L121
def dot_product_attention(query,
key,
value,
bias=None,
mask=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
dtype=jnp.float32,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It's slightly modified to add query scaling.
It calculates the attention weights given query and key and combines the
values using the attention weights.
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of
`[batch..., q_length, num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of
`[batch..., kv_length, num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of
`[batch..., kv_length, num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`.
This can be used for incorporating causal masks, padding masks,
proximity bias, etc.
mask: mask for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`.
This can be used for incorporating causal masks.
Attention weights are masked out if their corresponding mask value
is `False`.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[batch..., q_length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
# compute attention weights
query = QueryScaler(dim=query.shape[-1])(query)
attn_weights = nn.attention.dot_product_attention_weights(
query,
key,
bias,
mask,
broadcast_dropout,
dropout_rng,
dropout_rate,
deterministic,
dtype,
precision)
# return weighted sum over values for each query position
return jnp.einsum(
'...hqk,...khd->...qhd', attn_weights, value, precision=precision)
class MultiHeadedSelfAttention(nn.Module):
"""Self attention sub-layer used in the Conformer layer.
Input is first normalized using layer norm. Output is processed using
multi-headed attention.
Note: this attention implementation uses a learned scale parameter to scale
query matrix before passing it to flax attention module.
"""
config: ConformerConfig = None
@nn.compact
def __call__(self, inputs, paddings, train):
config = self.config
mask_paddings = 1 - paddings
attention_mask = nn.make_attention_mask(
mask_paddings > 0, mask_paddings > 0, dtype=jnp.float32)
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
result = nn.SelfAttention(
num_heads=config.num_attention_heads,
qkv_features=config.encoder_dim,
decode=False,
dtype=config.dtype,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.zeros,
use_bias=True,
broadcast_dropout=False,
attention_fn=dot_product_attention,
dropout_rate=config.attention_dropout_rate,
deterministic=not train)(inputs, attention_mask)
if config.attention_residual_dropout_rate is None:
attention_residual_dropout_rate = 0.1
else:
attention_residual_dropout_rate = config.attention_residual_dropout_rate
result = nn.Dropout(
rate=attention_residual_dropout_rate, deterministic=not train)(
result)
return result
class BatchNorm(nn.Module):
"""Implements batch norm respecting input paddings.
This implementation takes into account input padding by masking inputs before
computing mean and variance.
This is inspired by lingvo jax implementation of BatchNorm:
https://github.com/tensorflow/lingvo/blob/84b85514d7ad3652bc9720cb45acfab08604519b/lingvo/jax/layers/normalizations.py#L92
and the corresponding defaults for momentum and epsilon have been copied over
from lingvo.
"""
config: ConformerConfig
def setup(self):
dim = self.config.encoder_dim
dtype = self.config.dtype
self.ra_mean = self.variable('batch_stats',
'mean',
lambda s: jnp.zeros(s, dtype),
dim)
self.ra_var = self.variable('batch_stats',
'var',
lambda s: jnp.ones(s, dtype),
dim)
self.gamma = self.param('scale', nn.initializers.zeros, dim, dtype)
self.beta = self.param('bias', nn.initializers.zeros, dim, dtype)
@nn.compact
def __call__(self, inputs, input_paddings, train):
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
padding = jnp.expand_dims(input_paddings, -1)
momentum = self.config.batch_norm_momentum
epsilon = self.config.batch_norm_epsilon
if train:
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=True)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=True)
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = jnp.sum(
(inputs - mean) * (inputs - mean) * mask,
axis=reduce_over_dims,
keepdims=True)
var = sum_vv / count_v
self.ra_mean.value = momentum * \
self.ra_mean.value + (1 - momentum) * mean
self.ra_var.value = momentum * \
self.ra_var.value + (1 - momentum) * var
else:
mean = self.ra_mean.value
var = self.ra_var.value
inv = (1 + self.gamma) / jnp.sqrt(var + epsilon)
bn_output = (inputs - mean) * inv + self.beta
bn_output *= 1.0 - padding
return bn_output
class ConvolutionBlock(nn.Module):
r"""Convolution block in conformer layer.
architecture:
input # (batch, time, hidden_dim)
|
layer_norm(.) # (batch, time, hidden_dim)
dense(.), dense(.) # (batch, time, 2 * hidden_dim)
| /
glu(.) # (batch, time, hidden_dim)
depthwise_conv1d(.)
batch_norm(.)
act(.)
|
dense(.)
dropout(.)
|
output
"""
config: ConformerConfig
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
input_gated1 = nn.Dense(
config.encoder_dim,
kernel_init=nn.initializers.xavier_uniform(),
use_bias=True)(
inputs)
input_gated2 = nn.Dense(
config.encoder_dim,
kernel_init=nn.initializers.xavier_uniform(),
use_bias=True)(
inputs)
inputs = input_gated1 * jax.nn.sigmoid(input_gated2)
inputs = inputs * (1 - jnp.expand_dims(input_paddings, -1))
inputs = nn.Conv(
features=config.encoder_dim,
kernel_size=(config.convolution_kernel_size,),
strides=(1,),
padding='SAME',
feature_group_count=config.encoder_dim,
use_bias=False,
kernel_init=nn.initializers.xavier_uniform())(
inputs)
inputs = BatchNorm(config)(inputs, input_paddings, train)
inputs = nn.swish(inputs)
inputs = nn.Dense(
config.encoder_dim, kernel_init=nn.initializers.xavier_uniform())(
inputs)
if config.conv_residual_dropout_rate is None:
conv_residual_dropout_rate = 0.0
else:
conv_residual_dropout_rate = config.conv_residual_dropout_rate
inputs = nn.Dropout(
rate=conv_residual_dropout_rate, deterministic=not train)(
inputs)
return inputs
class ConformerBlock(nn.Module):
"""Implements a single conformer encoder layer.
High level overview:
x = x + 0.5 * FeedForward(x)
x = x + MHSA(x)
x = x + ConvolutionBlock(x)
x = x + 0.5 * FeedForward(x)
y = layer_norm(x)
"""
config: ConformerConfig
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
padding_mask = jnp.expand_dims(1 - input_paddings, -1)
inputs = inputs + 0.5 * FeedForwardModule(config=self.config)(
inputs, padding_mask, train)
inputs = inputs + MultiHeadedSelfAttention(config=self.config)(
inputs, input_paddings, train)
inputs = inputs + \
ConvolutionBlock(config)(inputs, input_paddings, train)
inputs = inputs + 0.5 * FeedForwardModule(config=self.config)(
inputs, padding_mask, train)
inputs = LayerNorm(dim=config.encoder_dim)(inputs)
return inputs
class Conformer(nn.Module):
"""Conformer (encoder + decoder) block.
Takes audio input signals and outputs probability distribution over vocab size
for each time step. The output is then fed into a CTC loss which eliminates
the need for alignment with targets.
"""
config: ConformerConfig
def setup(self):
self.specaug = spectrum_augmenter.SpecAug(
freq_mask_count=self.config.freq_mask_count,
freq_mask_max_bins=self.config.freq_mask_max_bins,
time_mask_count=self.config.time_mask_count,
time_mask_max_frames=self.config.time_mask_max_frames,
time_mask_max_ratio=self.config.time_mask_max_ratio,
time_masks_per_frame=self.config.time_masks_per_frame,
use_dynamic_time_mask_max_frames=self.config
.use_dynamic_time_mask_max_frames)
@nn.compact
def __call__(self, inputs, input_paddings, train):
config = self.config
outputs = inputs
output_paddings = input_paddings
# Compute normalized log mel spectrograms from input audio signal.
preprocessing_config = preprocessor.LibrispeechPreprocessingConfig()
outputs, output_paddings = preprocessor.MelFilterbankFrontend(
preprocessing_config,
per_bin_mean=preprocessor.LIBRISPEECH_MEAN_VECTOR,
per_bin_stddev=preprocessor.LIBRISPEECH_STD_VECTOR)(
outputs, output_paddings)
# Ablate random parts of input along temporal and frequency dimension
# following the specaug procedure in https://arxiv.org/abs/1904.08779.
if train and config.use_specaug:
outputs, output_paddings = self.specaug(outputs, output_paddings)
# Subsample input by a factor of 4 by performing strided convolutions.
if config.input_dropout_rate is None:
input_dropout_rate = 0.1
else:
input_dropout_rate = config.input_dropout_rate
outputs, output_paddings = Subsample(
encoder_dim=config.encoder_dim,
input_dropout_rate=input_dropout_rate)(
outputs, output_paddings, train)
# Run the conformer encoder layers.
for _ in range(config.num_encoder_layers):
outputs = ConformerBlock(config)(outputs, output_paddings, train)
outputs = LayerNorm(config.encoder_dim)(outputs)
# Run the decoder which in this case is a trivial projection layer.
outputs = nn.Dense(
config.vocab_size,
use_bias=True,
kernel_init=nn.initializers.xavier_uniform())(
outputs)
return outputs, output_paddings
|
import functools
import math
from typing import Dict, Iterator, Optional, Tuple
from flax import jax_utils
import flax.linen as nn
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
import torch
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.librispeech_conformer import metrics
from algorithmic_efficiency.workloads.librispeech_conformer import workload
from algorithmic_efficiency.workloads.librispeech_conformer.input_pipeline import \
LibriSpeechDataset
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax import \
models
class LibriSpeechConformerWorkload(workload.BaseLibrispeechWorkload):
def __init__(self,
tokenizer_vocab_path: Optional[str] = None,
use_specaug: bool = True) -> None:
super().__init__()
self.metrics_bundle = metrics.get_metrics_bundle(tokenizer_vocab_path)
self.use_specaug = use_specaug
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Conformer model init function.
Here we use dropout_rate as *_residual_dropout_rate, and aux_dropout_rate as
input_dropout_rate.
"""
model_config = models.ConformerConfig(
attention_residual_dropout_rate=dropout_rate,
feed_forward_residual_dropout_rate=dropout_rate,
input_dropout_rate=aux_dropout_rate,
use_specaug=self.use_specaug)
self._model = models.Conformer(model_config)
input_shape = [(320000,), (320000,)]
fake_input_batch = [np.zeros((2, *x), jnp.float32) for x in input_shape]
model_init_fn = jax.jit(functools.partial(self._model.init, train=False))
params_rng, dropout_rng = jax.random.split(rng, 2)
variables = model_init_fn({'params': params_rng, 'dropout': dropout_rng},
*fake_input_batch)
model_state, params = variables.pop('params')
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
model_state = jax_utils.replicate(model_state)
params = jax_utils.replicate(params)
return params, model_state
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_0'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
variables = {'params': params, **model_state}
inputs, input_paddings = augmented_and_preprocessed_input_batch['inputs']
is_train_mode = mode == spec.ForwardPassMode.TRAIN
if update_batch_norm or is_train_mode:
(logits, logit_paddings), new_model_state = self._model.apply(
variables,
inputs,
input_paddings,
train=True,
rngs={'dropout' : rng},
mutable=['batch_stats'])
return (logits, logit_paddings), new_model_state
else:
logits, logit_paddings = self._model.apply(
variables,
inputs,
input_paddings,
train=False,
mutable=False)
return (logits, logit_paddings), model_state
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del data_rng
del cache
del repeat_final_dataset
del num_batches
train = False
if split == 'train':
split = 'train-clean-100+train-clean-360+train-other-500'
train = True
elif split == 'eval_train':
split = 'train-clean-100+train-clean-360+train-other-500'
elif split == 'validation':
split = 'dev-clean+dev-other'
elif split == 'test':
split = 'test-clean'
ds = LibriSpeechDataset(split=split, data_dir=data_dir)
dataloader = data_utils.cycle(
torch.utils.data.DataLoader(
ds,
batch_size=global_batch_size,
shuffle=train,
sampler=None,
num_workers=4,
prefetch_factor=10,
pin_memory=False,
drop_last=train,
))
for batch in iter(dataloader):
inputs, input_paddings = batch['inputs']
targets, target_paddings = batch['targets']
numpy_batch = {
'inputs': (inputs.numpy(), input_paddings.numpy()),
'targets': (targets.numpy(), target_paddings.numpy()),
}
padded_batch = data_utils.shard_and_maybe_pad_np(
numpy_batch, padding_value=1.0, global_batch_size=global_batch_size)
yield padded_batch
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: Tuple[spec.Tensor, spec.Tensor], # (label_batch, padding)
logits_batch: Tuple[spec.Tensor, spec.Tensor], # (logits_batch, padding)
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
del label_smoothing
logits, logit_paddings = logits_batch
targets, target_paddings = label_batch
logprobs = nn.log_softmax(logits)
per_example_losses = self.ctc_loss(logprobs,
logit_paddings,
targets,
target_paddings)
# mask_batch is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
mask_batch = jnp.logical_and(mask_batch, 1 - target_paddings)
else:
mask_batch = 1 - target_paddings
n_valid_examples = jnp.maximum(mask_batch.sum(), 1)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
def ctc_loss(self,
logits: spec.Tensor,
logit_paddings: spec.Tensor,
labels: spec.Tensor,
label_paddings: spec.Tensor,
blank_id: int = 0) -> spec.Tensor:
return optax.ctc_loss(logits,
logit_paddings,
labels,
label_paddings,
blank_id)
# Adapted from lingvo's greedy decoding logic here:
# https://github.com/tensorflow/lingvo/blob/2ee26814c57b7dcead3f0382170f2f3da006f810/lingvo/jax/layers/ctc_objectives.py#L138.
def sequence_mask(self, lengths: spec.Tensor, maxlen: int) -> spec.Tensor:
batch_size = lengths.shape[0]
a = jnp.ones([batch_size, maxlen])
b = jnp.cumsum(a, axis=-1)
c = jnp.less_equal(b, lengths[:, jnp.newaxis]).astype(lengths.dtype)
return c
def collapse_and_remove_blanks(self,
labels: spec.Tensor,
seq_length: spec.Tensor,
blank_id: int = 0) -> spec.Tensor:
b, t = labels.shape
# Zap out blank.
blank_mask = 1 - jnp.equal(labels, blank_id)
labels = (labels * blank_mask).astype(labels.dtype)
# Mask labels that don't equal previous label.
label_mask = jnp.concatenate([
jnp.ones_like(labels[:, :1], dtype=jnp.int32),
jnp.not_equal(labels[:, 1:], labels[:, :-1]),
],
axis=1)
# Filter labels that aren't in the original sequence.
maxlen = labels.shape[1]
seq_mask = self.sequence_mask(seq_length, maxlen=maxlen)
label_mask = label_mask * seq_mask
# Remove repetitions from the labels.
ulabels = label_mask * labels
# Count masks for new sequence lengths.
label_mask = jnp.not_equal(ulabels, 0).astype(labels.dtype)
new_seq_len = jnp.sum(label_mask, axis=1)
# Mask indexes based on sequence length mask.
new_maxlen = maxlen
idx_mask = self.sequence_mask(new_seq_len, maxlen=new_maxlen)
# Flatten everything and mask out labels to keep and sparse indices.
flat_labels = jnp.reshape(ulabels, [-1])
flat_idx_mask = jnp.reshape(idx_mask, [-1])
indices = jnp.nonzero(flat_idx_mask, size=b * t)[0]
values = jnp.nonzero(flat_labels, size=b * t)[0]
updates = jnp.take_along_axis(flat_labels, values, axis=-1)
# Scatter to flat shape.
flat = jnp.zeros(flat_idx_mask.shape).astype(labels.dtype)
flat = flat.at[indices].set(updates)
# 0'th position in the flat array gets clobbered by later padded updates,
# so reset it here to its original value.
flat = flat.at[0].set(updates[0])
# Reshape back to square batch.
batch_size = labels.shape[0]
new_shape = [batch_size, new_maxlen]
return (jnp.reshape(flat, new_shape).astype(labels.dtype),
new_seq_len.astype(seq_length.dtype))
def greedy_decode(
self, logits: spec.Tensor,
logit_paddings: spec.Tensor) -> Tuple[spec.Tensor, spec.Tensor]:
per_frame_max = jnp.argmax(logits, axis=-1)
seqlen = jnp.sum(1.0 - logit_paddings, axis=-1)
hyp, _ = self.collapse_and_remove_blanks(per_frame_max, seqlen, blank_id=0)
hyp_paddings = jnp.equal(hyp, 0).astype(jnp.int32)
return hyp, hyp_paddings
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0, None),
static_broadcasted_argnums=(0,))
def eval_step_pmapped(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
(logits, logit_paddings), _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
decoded, decoded_paddings = self.greedy_decode(logits, logit_paddings)
loss = self.loss_fn(batch['targets'], (logits, logit_paddings))
targets, target_paddings = batch['targets']
return self.metrics_bundle.gather_from_model_output(
loss_dict=loss,
decoded=decoded,
decoded_paddings=decoded_paddings,
targets=targets,
target_paddings=target_paddings,
axis_name='batch')
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del global_step
if model_state is not None:
# Sync batch statistics across replicas before evaluating.
model_state = self.sync_batch_stats(model_state)
num_batches = int(math.ceil(num_examples / global_batch_size))
if split not in self._eval_iters:
self._eval_iters[split] = self._build_input_queue(
rng, split, data_dir, global_batch_size, num_batches=num_batches)
metrics_report = None
for _ in range(num_batches):
eval_batch = next(self._eval_iters[split])
computed_metrics = self.eval_step_pmapped(params,
eval_batch,
model_state,
rng).unreplicate()
if metrics_report is None:
metrics_report = computed_metrics
else:
# `merge` aggregates the metrics across batches.
metrics_report = metrics_report.merge(computed_metrics)
computed_metrics = metrics_report.compute()
return computed_metrics
def sync_batch_stats(
self, model_state: spec.ModelAuxiliaryState) -> spec.ModelAuxiliaryState:
# An axis_name is passed to pmap which can then be used by pmean.
# In this case each device has its own version of the batch statistics and
# we average them.
avg_fn = jax.pmap(lambda x: lax.pmean(x, 'x'), 'x')
new_model_state = model_state.copy(
{'batch_stats': avg_fn(model_state['batch_stats'])})
return new_model_state
|
"""Flax layer to perform preprocessing on librispeech audio inputs.
This layer computes windowed short time fourier transform over audio signals
then converts it to mel scale and finally takes a logarithm of resulting
mel spectrograms and normalizes it to be used in speech recognition models.
This code is based on lingvo's librispeech preprocessing code here:
https://github.com/tensorflow/lingvo/blob/master/lingvo/tasks/asr/frontend.py
"""
from typing import Any, Optional, Union
from flax import linen as nn
from flax import struct
import jax
import jax.numpy as jnp
import numpy as np
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
LIBRISPEECH_MEAN_VECTOR = [
-7.6047816276550293,
-7.1206226348876953,
-6.8864245414733887,
-6.8705768585205078,
-6.9667720794677734,
-7.1084094047546387,
-6.9528026580810547,
-6.783994197845459,
-6.6195521354675293,
-6.4876265525817871,
-6.4120659828186035,
-6.394047737121582,
-6.4244871139526367,
-6.3993711471557617,
-6.5158271789550781,
-6.7137999534606934,
-6.8476877212524414,
-6.9885001182556152,
-6.9221386909484863,
-7.146148681640625,
-7.2040400505065918,
-7.0537552833557129,
-7.3140382766723633,
-7.1223249435424805,
-7.30251407623291,
-7.1212143898010254,
-7.2425732612609863,
-7.1730537414550781,
-7.0979413986206055,
-7.088747501373291,
-6.9849910736083984,
-6.8787732124328613,
-6.7602753639221191,
-6.6300945281982422,
-6.5145769119262695,
-6.4245057106018066,
-6.356513500213623,
-6.31787633895874,
-6.2660770416259766,
-6.2468328475952148,
-6.2821526527404785,
-6.1908388137817383,
-6.2484354972839355,
-6.1472640037536621,
-6.0924725532531738,
-6.0171003341674805,
-5.9250402450561523,
-5.8535833358764648,
-5.8209109306335449,
-5.8118929862976074,
-5.80783748626709,
-5.7714629173278809,
-5.7453732490539551,
-5.7705655097961426,
-5.7765641212463379,
-5.7831673622131348,
-5.7954087257385254,
-5.7994823455810547,
-5.8023476600646973,
-5.8047118186950684,
-5.8168182373046875,
-5.8844799995422363,
-5.9727106094360352,
-6.0444660186767578,
-6.1284866333007812,
-6.2257585525512695,
-6.3157496452331543,
-6.39061164855957,
-6.4928598403930664,
-6.5498456954956055,
-6.6054320335388184,
-6.6508378982543945,
-6.66917610168457,
-6.6726889610290527,
-6.684234619140625,
-6.6974577903747559,
-6.75471830368042,
-6.7949142456054688,
-6.8634209632873535,
-6.94186544418335
]
LIBRISPEECH_STD_VECTOR = [
3.4353282451629639,
3.5962932109832764,
3.7012472152709961,
3.7369205951690674,
3.7535104751586914,
3.693629264831543,
3.6922497749328613,
3.7641522884368896,
3.8419716358184814,
3.8999848365783691,
3.9294240474700928,
3.9317409992218018,
3.9139585494995117,
3.9031598567962646,
3.8691999912261963,
3.8155081272125244,
3.7644970417022705,
3.7099106311798096,
3.6965086460113525,
3.6003766059875488,
3.5493226051330566,
3.5465121269226074,
3.45003604888916,
3.4712812900543213,
3.4084610939025879,
3.4408135414123535,
3.4104881286621094,
3.4217638969421387,
3.4312851428985596,
3.4199209213256836,
3.4305806159973145,
3.4382665157318115,
3.4580366611480713,
3.4817991256713867,
3.4958710670471191,
3.5036792755126953,
3.5047574043273926,
3.4988734722137451,
3.493056058883667,
3.4822943210601807,
3.459430456161499,
3.4612770080566406,
3.4559063911437988,
3.4755423069000244,
3.4971549510955811,
3.5326557159423828,
3.5705199241638184,
3.5920312404632568,
3.596907377243042,
3.5913500785827637,
3.5865931510925293,
3.5826809406280518,
3.5837743282318115,
3.5895791053771973,
3.5819313526153564,
3.5837869644165039,
3.5861184597015381,
3.5889589786529541,
3.592214822769165,
3.5939455032348633,
3.5856630802154541,
3.5884113311767578,
3.5921022891998291,
3.5870490074157715,
3.5806570053100586,
3.5731067657470703,
3.5617532730102539,
3.54980731010437,
3.5527374744415283,
3.5475366115570068,
3.5387849807739258,
3.5256178379058838,
3.5031836032867432,
3.4922726154327393,
3.4879646301269531,
3.4725594520568848,
3.4558389186859131,
3.4351828098297119,
3.4284293651580811,
3.4299170970916748
]
@struct.dataclass
class LibrispeechPreprocessingConfig:
"""Config to hold all preprocessing options for librispeech dataset."""
sample_rate: float = 16000.0
frame_size_ms: float = 25.0
frame_step_ms: float = 10.0
compute_energy: bool = True
window_fn: str = 'HANNING'
output_log_floor: float = 1.0
pad_end: bool = False
preemph: float = 0.97
preemph_htk_flavor: bool = True
noise_scale: float = 0.0
num_bins: int = 80
lower_edge_hertz: float = 125.0
upper_edge_hertz: float = 7600.0
fft_overdrive: bool = False
output_floor: float = 0.000010
def _hertz_to_mel(frequencies_hertz):
"""Convert hertz to mel."""
return _MEL_HIGH_FREQUENCY_Q * jnp.log(1.0 + (frequencies_hertz /
_MEL_BREAK_FREQUENCY_HERTZ))
def _pad_end_length(num_timesteps, frame_step, frame_size):
"""Returns how many sample needed to be padded for pad_end feature."""
# The number of frames that can be extracted from the signal.
num_frames = int(np.ceil(num_timesteps / frame_step))
# Signal length required for computing `num_frames` frames.
padded_length = frame_step * (num_frames - 1) + frame_size
return padded_length - num_timesteps
def frame(x,
frame_length: int,
frame_step: int,
pad_end: bool = False,
pad_value: Union[int, float] = 0.0):
"""Slides a window and extract values.
This function extracts `x[:, n:n+frame_length, :]` with sliding `n` with
stride of `frame_step`, and returns an array `y` with the shape
`(batch_size, num_frames, frame_length, num_channels)`. Unlike the
counterpart in Tensorflow (`tf.signal.frame`), this function currently does
not take `axis` argument, and the input tensor `x` is expected to have a
shape of `(batch_size, timesteps, channels)`.
Args:
x: An input array with `(batch_size, timesteps, channels)`-shape.
frame_length: The frame length.
frame_step: The frame hop size.
pad_end: If True, the end of signal is padded so the window can continue
sliding while the starting point of the window is in the valid range.
pad_value: A scalar used as a padding value when `pad_end` is True.
Returns:
A tensor with shape `(batch_size, num_frames, frame_length, num_chennels)`.
"""
_, num_timesteps, num_channels = x.shape
if pad_end:
num_extends = _pad_end_length(num_timesteps, frame_step, frame_length)
x = jnp.pad(
x, ((0, 0), (0, num_extends), (0, 0)),
'constant',
constant_values=pad_value)
flat_y = jax.lax.conv_general_dilated_patches(
x, (frame_length,), (frame_step,),
'VALID',
dimension_numbers=('NTC', 'OIT', 'NTC'))
ret = flat_y.reshape(flat_y.shape[:-1] + (num_channels, frame_length))
return ret.transpose((0, 1, 3, 2))
def linear_to_mel_weight_matrix(num_mel_bins: int = 20,
num_spectrogram_bins: int = 129,
sample_rate: Union[int, float] = 8000,
lower_edge_hertz: Union[int, float] = 125.0,
upper_edge_hertz: Union[int, float] = 3800.0,
dtype: Any = jnp.float32):
r"""Jax-port of `tf.signal.linear_to_mel_weight_matrix`.
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in the
source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: An integer or float `Tensor`. Samples per second of the input
signal used to create the spectrogram. Used to figure out the frequencies
corresponding to each spectrogram bin, which dictates how they are mapped
into the mel scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
Returns:
An array of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, `upper_edge_hertz` is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
# Input validator from tensorflow/python/ops/signal/mel_ops.py#L71
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got %s for sample_rate: %s' %
(upper_edge_hertz, sample_rate))
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = jnp.linspace(
0.0, nyquist_hertz, num_spectrogram_bins, dtype=dtype)[bands_to_zero:]
spectrogram_bins_mel = _hertz_to_mel(linear_frequencies)[:, jnp.newaxis]
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
edges = jnp.linspace(
_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2,
dtype=dtype)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel = edges[:-2][jnp.newaxis, :]
center_mel = edges[1:-1][jnp.newaxis, :]
upper_edge_mel = edges[2:][jnp.newaxis, :]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = jnp.maximum(0.0, jnp.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
return jnp.pad(mel_weights_matrix, [[bands_to_zero, 0], [0, 0]])
def _hanning_greco(win_support, frame_size, dtype):
"""Add a greco-style hanning window to the graph.
Note that the Hanning window in Wikipedia is not the same as the Hanning
window in Greco. The Greco3 Hanning window at 0 is NOT 0, as the wikipedia
page would indicate. Talkin's explanation was that it was like wasting two
samples to have the values at the edge of the window to be 0.0 exactly.
Args:
win_support: Number of samples for non-zero support in the window
frame_size: Total size of the window (frame_size >= win_support)
dtype: TF data type
Returns:
Tensor of size frame_size with the window to apply.
"""
if frame_size < win_support:
raise ValueError(
'Provided frame_size = {} is lower than win_support = {}'.format(
frame_size, win_support))
arg = jnp.pi * 2.0 / (win_support)
hann = 0.5 - (0.5 * jnp.cos(arg *
(jnp.arange(win_support, dtype=dtype) + 0.5)))
zero_size = frame_size - win_support
return jnp.pad(hann, [(0, zero_size)])
def _next_pow_of_two(x: Union[int, float]) -> int:
return int(2**np.ceil(np.log2(x)))
class SpectrogramFrontend(nn.Module):
"""Layer to convert input audio signals from time domain to frequency domain.
"""
config: LibrispeechPreprocessingConfig = None
input_scale_factor: float = 1.0
output_log: bool = False
def setup(self) -> None:
p = self.config
self._frame_step = int(round(p.sample_rate * p.frame_step_ms / 1000.0))
self._frame_size = int(round(
p.sample_rate * p.frame_size_ms / 1000.0)) + 1 # +1 for the preemph
# TF-version has maximum of 512, but it's not always necessary
self.fft_size = _next_pow_of_two(self._frame_size)
if p.window_fn is None:
self._window_fn = None
elif p.window_fn.upper() == 'HANNING':
def _hanning_window(frame_size, dtype):
# Preparing 1-point longer window to follow TF's definition
if frame_size % 2 == 0:
# simulate periodic=True in tf.signal.hann_window
return jnp.hanning(frame_size + 1).astype(dtype)[:-1]
else:
return jnp.hanning(frame_size).astype(dtype)
self._window_fn = _hanning_window
elif p.window_fn.upper() == 'HANNING_GRECO':
# Greco-compatible hanning window
def f(frame_size, dtype):
return _hanning_greco(self._frame_size - 1, frame_size, dtype)
self._window_fn = f
else:
raise ValueError('Illegal value %r for window_fn param' % p.window_fn)
def _apply_preemphasis(self, framed_signal):
p = self.config
if p.preemph_htk_flavor:
return jnp.concatenate([
framed_signal[:, :, :1, :] * (1. - p.preemph),
(framed_signal[:, :, 1:-1, :] -
p.preemph * framed_signal[:, :, :-2, :])
],
axis=2)
else:
return (framed_signal[:, :, 1:, :] -
p.preemph * framed_signal[:, :, :-1, :])
def fprop_paddings(self, input_paddings):
p = self.config
if p.pad_end:
num_extends = _pad_end_length(input_paddings.shape[1],
self._frame_step,
self._frame_size)
input_paddings = jnp.pad(
input_paddings, ((0, 0), (0, num_extends)), constant_values=1.0)
return jax.lax.reduce_window(
input_paddings,
init_value=1.0,
computation=jax.lax.min,
window_dimensions=[1, self._frame_size],
window_strides=[1, self._frame_step],
padding='valid')
def next_prng_key(self, name='dropout'):
return self.make_rng(name)
@nn.compact
def __call__(self, inputs, input_paddings):
inputs = inputs.astype(jnp.float32)
p = self.config
# Expand to have a channel axis
if inputs.ndim == 2:
inputs = jnp.expand_dims(inputs, -1)
output_paddings = None
if input_paddings is not None:
inputs = inputs * jnp.expand_dims(1.0 - input_paddings, -1)
output_paddings = self.fprop_paddings(input_paddings)
else:
output_paddings = None
pcm_audio_chunk = inputs.astype(jnp.float32) * self.input_scale_factor
framed_signal = frame(
pcm_audio_chunk, self._frame_size, self._frame_step, pad_end=p.pad_end)
if p.preemph != 0.0:
preemphasized = self._apply_preemphasis(framed_signal)
else:
preemphasized = framed_signal[..., :-1, :]
if p.noise_scale > 0.0:
noise_signal = jax.random.normal(self.next_prng_key(),
preemphasized.shape) * p.noise_scale
else:
noise_signal = jnp.zeros(preemphasized.shape)
windowed_signal = preemphasized + noise_signal
# Window here
if self._window_fn is not None:
window = self._window_fn(self._frame_size - 1, framed_signal.dtype)
window = window.reshape((1, 1, self._frame_size - 1, 1))
windowed_signal *= window
spectrum = jnp.fft.rfft(windowed_signal, self.fft_size, axis=2)
spectrum = jnp.abs(spectrum)
if p.compute_energy:
spectrum = spectrum**2.0
outputs = spectrum
if self.output_log:
outputs = jnp.log(jnp.maximum(outputs, p.output_log_floor))
return outputs, output_paddings
class MelFilterbankFrontend(nn.Module):
"""Layer to compute log mel spectograms from input audio signals.
"""
config: LibrispeechPreprocessingConfig = None
use_divide_stream: bool = True
per_bin_mean: Optional[float] = None
per_bin_stddev: Optional[float] = None
def setup(self):
p = self.config
input_scale_factor = 2**-15 if self.use_divide_stream else 1.0
self.stft = SpectrogramFrontend(
p, input_scale_factor=input_scale_factor, output_log=False)
if self.per_bin_mean is None:
per_bin_mean = [0.0] * p.num_bins
else:
per_bin_mean = self.per_bin_mean
if self.per_bin_stddev is None:
per_bin_stddev = [1.0] * p.num_bins
else:
per_bin_stddev = self.per_bin_stddev
self._normalizer_mean = jnp.array(per_bin_mean)[
jnp.newaxis, jnp.newaxis, :, jnp.newaxis]
self._normalizer_stddev = jnp.array(per_bin_stddev)[
jnp.newaxis, jnp.newaxis, :, jnp.newaxis]
@nn.compact
def __call__(self, inputs, input_paddings):
p = self.config
spect, spect_paddings = self.stft(inputs, input_paddings)
mel_weights = linear_to_mel_weight_matrix(
num_mel_bins=p.num_bins,
num_spectrogram_bins=spect.shape[2],
sample_rate=p.sample_rate,
lower_edge_hertz=p.lower_edge_hertz,
upper_edge_hertz=p.upper_edge_hertz)
mel_spectrogram = jnp.einsum('fn,btfc->btnc', mel_weights, spect)
logmel_spectrogram = jnp.log(jnp.maximum(mel_spectrogram, p.output_floor))
normalized_logmel_spectrogram = (
(logmel_spectrogram - self._normalizer_mean) / self._normalizer_stddev)
normalized_logmel_spectrogram = jnp.squeeze(normalized_logmel_spectrogram,
-1)
return normalized_logmel_spectrogram, spect_paddings
|
"""This is a pytorch implementation mirroring:
https://github.com/google/init2winit/blob/master/init2winit/model_lib/spectrum_augmenter.py.
"""
import torch
from torch import nn
class SpecAug(nn.Module):
"""Layer performs masking prodecure along time and frequency axis.
The procedure is detailed in https://arxiv.org/abs/1904.08779.
This is an essential component in speech recognition models that
helps achieve better word error rates.
"""
def __init__(self,
freq_mask_count: int = 1,
freq_mask_max_bins: int = 15,
time_mask_count: int = 1,
time_mask_max_frames: int = 50,
time_mask_max_ratio: float = 1.0,
time_masks_per_frame: float = 0.0,
use_dynamic_time_mask_max_frames: bool = False):
super().__init__()
self.freq_mask_count = freq_mask_count
self.freq_mask_max_bins = freq_mask_max_bins
self.time_mask_count = time_mask_count
self.time_mask_max_frames = time_mask_max_frames
self.time_mask_max_ratio = time_mask_max_ratio
self.time_masks_per_frame = time_masks_per_frame
self.use_dynamic_time_mask_max_frames = use_dynamic_time_mask_max_frames
def next_prng_key(self, name='dropout'):
return self.make_rng(name)
def _get_mask(self,
batch_size,
choose_range,
mask_size,
max_length=None,
masks_per_frame=0.0,
multiplicity=1,
max_ratio=1.0,
device='cpu'):
# Sample lengths for multiple masks.
if max_length and max_length > 0:
max_length = max_length * torch.ones(batch_size, device=device)
else:
max_length = choose_range * max_ratio
masked_portion = torch.rand(batch_size, multiplicity, device=device)
masked_frame_size = torch.einsum('b,bm->bm', max_length,
masked_portion).long()
# Make sure the sampled length was smaller than max_ratio * length_bound.
# Note that sampling in this way was biased
# (shorter sequence may over-masked.)
choose_range = torch.tile(choose_range[:, None], [1, multiplicity])
length_bound = (max_ratio * choose_range).long()
length = torch.minimum(masked_frame_size, length_bound.clamp(min=1))
# Choose starting point.
random_start = torch.rand(batch_size, multiplicity, device=device)
start_with_in_valid_range = random_start * (choose_range - length + 1)
start = start_with_in_valid_range.long()
end = start + length - 1
# Shift starting and end point by small value.
delta = 0.1
start = (start - delta)[..., None]
start = torch.tile(start, [1, 1, mask_size])
end = (end + delta)[..., None]
end = torch.tile(end, [1, 1, mask_size])
# Construct pre-mask of shape (batch_size, multiplicity, mask_size).
diagonal = torch.arange(mask_size, device=device).reshape(1, 1, -1)
diagonal = torch.tile(diagonal, [batch_size, multiplicity, 1])
pre_mask = torch.minimum(diagonal < end, diagonal > start)
# Sum masks with appropriate multiplicity.
if masks_per_frame > 0:
multiplicity_weights = torch.tile(
torch.arange(multiplicity, device=device).long()[None, ...],
[batch_size, 1])
multiplicity_tensor = masks_per_frame * choose_range
multiplicity_weights = (multiplicity_weights < multiplicity_tensor).long()
pre_mask = torch.einsum('bmt,bm->bt', pre_mask, multiplicity_weights)
else:
pre_mask = torch.einsum('bmt->bt', pre_mask)
mask = 1.0 - (pre_mask > 0).long()
return mask
def _time_mask(self, inputs, length):
# Get time masking parameters.
time_mask_max_frames = self.time_mask_max_frames
use_dynamic_time_mask_max_frames = self.use_dynamic_time_mask_max_frames
multiplicity = self.time_mask_count
max_ratio = self.time_mask_max_ratio
# If maximum mask length is zero, do nothing.
if ((time_mask_max_frames == 0 and not use_dynamic_time_mask_max_frames) or
max_ratio <= 0.0):
return inputs
if multiplicity == 0:
return inputs
batch_size, time_length, _ = inputs.shape
# When using dynamic time mask size, discard upper-bound on
# maximum allowed frames for time mask.
if use_dynamic_time_mask_max_frames:
time_mask_max_frames = None
# Create masks in time direction and apply.
block_arrays = self._get_mask(
batch_size,
choose_range=length,
mask_size=time_length,
max_length=time_mask_max_frames,
masks_per_frame=self.time_masks_per_frame,
multiplicity=multiplicity,
max_ratio=max_ratio,
device=inputs.device)
outputs = torch.einsum('bxy,bx->bxy', inputs, block_arrays)
return outputs
def _frequency_mask(self, inputs):
# Mask parameters.
freq_mask_max_bins = self.freq_mask_max_bins
multiplicity = self.freq_mask_count
# If masking length or count is zero, do nothing.
if freq_mask_max_bins == 0 or multiplicity == 0:
return inputs
# Arguments to pass to mask generator.
batch_size, _, num_freq = inputs.shape
choose_range = num_freq * torch.ones(batch_size, device=inputs.device)
# Create masks in frequency direction and apply.
block_arrays = self._get_mask(
batch_size,
choose_range=choose_range,
mask_size=num_freq,
max_length=freq_mask_max_bins,
masks_per_frame=0.0,
multiplicity=multiplicity,
max_ratio=1.0,
device=inputs.device)
outputs = torch.einsum('bxy,by->bxy', inputs, block_arrays)
return outputs
def forward(self, inputs, paddings):
lengths = torch.einsum('bh->b', 1 - paddings).long()
inputs = self._time_mask(inputs, lengths)
inputs = self._frequency_mask(inputs)
return inputs, paddings
|
"""This is a pytorch implementation mirroring:
https://github.com/google/init2winit/blob/master/init2winit/model_lib/conformer.py.
"""
from dataclasses import dataclass
import math
from typing import Tuple
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch import \
preprocessor
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.spectrum_augmenter import \
SpecAug
@dataclass
class ConformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int = 1024
encoder_dim: int = 512
num_attention_heads: int = 8
num_encoder_layers: int = 4
attention_dropout_rate: float = 0.0
attention_residual_dropout_rate: float = 0.1
conv_residual_dropout_rate: float = 0.0
feed_forward_dropout_rate: float = 0.0
feed_forward_residual_dropout_rate: float = 0.1
convolution_kernel_size: int = 5
feed_forward_expansion_factor: int = 4
freq_mask_count: int = 2
freq_mask_max_bins: int = 27
time_mask_count: int = 10
time_mask_max_frames: int = 40
time_mask_max_ratio: float = 0.05
time_masks_per_frame: float = 0.0
use_dynamic_time_mask_max_frames: bool = True
input_dropout_rate: float = 0.1
batch_norm_momentum: float = 0.999
batch_norm_epsilon: float = 0.001
use_specaug: bool = True
def initialize(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv1d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.MultiheadAttention):
init.xavier_uniform_(m.in_proj_weight)
for i in m.children():
initialize(i)
class LayerNorm(nn.Module):
def __init__(self, dim, epsilon=1e-6):
super().__init__()
self.dim = dim
self.scale = nn.Parameter(torch.zeros(self.dim))
self.bias = nn.Parameter(torch.zeros(self.dim))
self.epsilon = epsilon
def forward(self, x):
return F.layer_norm(x, (self.dim,), 1 + self.scale, self.bias, self.epsilon)
class Subsample(nn.Module):
def __init__(self, encoder_dim: int = 0, input_dropout_rate: float = 0.0):
super().__init__()
self.encoder_dim = encoder_dim
self.input_dropout_rate = input_dropout_rate
self.conv1 = Conv2dSubsampling(
input_channels=1, output_channels=encoder_dim)
self.conv2 = Conv2dSubsampling(
input_channels=encoder_dim, output_channels=encoder_dim)
self.linear = nn.LazyLinear(out_features=self.encoder_dim, bias=True)
self.pos_encode = AddPositionalEmbedding(embedding_dim=self.encoder_dim)
self.dropout = nn.Dropout(p=self.input_dropout_rate)
def forward(self, inputs, input_paddings):
output_paddings = input_paddings
outputs = inputs[:, None, :, :]
outputs, output_paddings = self.conv1(outputs, output_paddings)
outputs, output_paddings = self.conv2(outputs, output_paddings)
batch_size, channels, subsampled_lengths, subsampled_dims = outputs.shape
outputs = outputs.permute(0, 2, 3, 1).reshape(batch_size,
subsampled_lengths,
subsampled_dims * channels)
outputs = self.linear(outputs)
outputs = outputs + self.pos_encode(seq_length=outputs.shape[1])
outputs = self.dropout(outputs)
return outputs, output_paddings
class Conv2dSubsampling(nn.Module):
def __init__(self,
input_channels: int,
output_channels: int,
filter_stride: Tuple[int] = (2, 2),
padding: str = 'SAME'):
super().__init__()
self.input_channels = input_channels
self.output_channels = output_channels
self.filter_stride = filter_stride
self.padding = padding
self.filter_shape = (output_channels, input_channels, 3, 3)
self.kernel = nn.Parameter(
torch.nn.init.xavier_uniform_(torch.empty(*self.filter_shape)))
self.bias = nn.Parameter(torch.zeros(output_channels))
def get_same_padding(self, input_shape):
in_height, in_width = input_shape[2:]
stride_height, stride_width = self.filter_stride
filter_height, filter_width = 3, 3
if in_height % stride_height == 0:
pad_along_height = max(filter_height - stride_height, 0)
else:
pad_along_height = max(filter_height - (in_height % stride_height), 0)
if in_width % stride_width == 0:
pad_along_width = max(filter_width - stride_width, 0)
else:
pad_along_width = max(filter_width - (in_width % stride_width), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return (pad_left, pad_right, pad_top, pad_bottom)
def forward(self, inputs, paddings):
groups = inputs.shape[1] // self.input_channels
if self.padding == 'SAME':
in_ = F.pad(inputs, self.get_same_padding(inputs.shape))
else:
in_ = inputs
outputs = F.conv2d(
input=in_,
weight=self.kernel,
bias=self.bias,
stride=self.filter_stride,
dilation=(1, 1),
groups=groups)
outputs = F.relu(outputs)
input_length = paddings.shape[1]
stride = self.filter_stride[0]
pad_len = (input_length + stride - 1) // stride * stride - input_length
padded_paddings = torch.cat([
paddings[:, None, :],
torch.zeros(
size=(paddings.shape[0], 1, pad_len), device=paddings.device)
],
dim=2)
out_padding = F.conv1d(
input=padded_paddings,
weight=torch.ones([1, 1, 1], device=paddings.device),
stride=self.filter_stride[:1])
out_padding = out_padding.squeeze(dim=1)
outputs = outputs * (1 - out_padding[:, None, :, None])
return outputs, out_padding
class FeedForwardModule(nn.Module):
def __init__(self, config: ConformerConfig):
super().__init__()
self.config = config
self.ln = LayerNorm(dim=config.encoder_dim)
self.linear1 = nn.LazyLinear(
out_features=config.encoder_dim * config.feed_forward_expansion_factor,
bias=True)
self.dropout1 = nn.Dropout(p=config.feed_forward_dropout_rate)
self.linear2 = nn.LazyLinear(out_features=config.encoder_dim, bias=True)
if config.feed_forward_residual_dropout_rate is None:
feed_forward_residual_dropout_rate = 0.1
else:
feed_forward_residual_dropout_rate = (
config.feed_forward_residual_dropout_rate)
self.dropout2 = nn.Dropout(p=feed_forward_residual_dropout_rate)
def forward(self, inputs, padding_mask):
inputs = self.ln(inputs)
inputs = self.linear1(inputs)
inputs = F.silu(inputs)
inputs = self.dropout1(inputs)
inputs = inputs * padding_mask
inputs = self.linear2(inputs)
inputs = inputs * padding_mask
inputs = self.dropout2(inputs)
return inputs
class AddPositionalEmbedding(nn.Module):
def __init__(self,
min_timescale: int = 1,
max_timescale: int = 10_000,
embedding_dim: int = 512):
super().__init__()
self.min_timescale = min_timescale
self.max_timescale = max_timescale
self.embedding_dim = embedding_dim
num_timescales = self.embedding_dim // 2
log_timescale_increment = math.log(
float(self.max_timescale) / float(self.min_timescale)) / (
num_timescales - 1)
inv_timescales = self.min_timescale * \
torch.exp(torch.arange(num_timescales, dtype=torch.float32)
* -log_timescale_increment)
self.register_buffer('inv_timescales', inv_timescales[None, None, :])
def forward(self, seq_length):
position = torch.arange(
end=seq_length, dtype=torch.float32, device=self.inv_timescales.device)
scaled_time = position[None, :, None] * self.inv_timescales
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)
if self.embedding_dim % 2:
signal = torch.cat(
[signal, torch.zeros(signal.shape[0], signal.shape[1], 1)], dim=2)
return signal
class QueryScaler(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
self.scale = nn.Parameter(torch.zeros(self.dim))
def forward(self, inputs):
r_softplus_0 = 1.442695041
scale = r_softplus_0 * F.softplus(self.scale)
return inputs * scale
class MHSAwithQS(nn.MultiheadAttention):
# pylint: disable=locally-disabled, use-a-generator, line-too-long, invalid-name
def __init__(self, config: ConformerConfig):
super().__init__(
embed_dim=config.encoder_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout_rate,
bias=True,
batch_first=True)
self.qs = QueryScaler(dim=config.encoder_dim // config.num_attention_heads)
def _scaled_in_proj_weight(self):
# Scale the query projection weight.
qs_input = self.in_proj_weight[:self.embed_dim].view(
self.num_heads, self.embed_dim // self.num_heads, -1).transpose(1, 2)
in_proj_queryW_scaled = self.qs(qs_input).transpose(
1, 2).view(*self.in_proj_weight[:self.embed_dim].shape)
in_proj_weight = torch.cat(
[in_proj_queryW_scaled, self.in_proj_weight[self.embed_dim:]])
return in_proj_weight
def _scaled_in_proj_bias(self):
# Scale the query bias.
in_proj_queryb_scaled = self.qs(self.in_proj_bias[:self.embed_dim].view(
self.num_heads, self.embed_dim // self.num_heads)).view(-1)
in_proj_bias = torch.cat(
[in_proj_queryb_scaled, self.in_proj_bias[self.embed_dim:]])
return in_proj_bias
def forward(self,
query,
key,
value,
key_padding_mask=None,
need_weights: bool = True,
attn_mask=None,
average_attn_weights: bool = True):
r"""
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and byte masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
is_batched = query.dim() == 3
if key_padding_mask is not None:
_kpm_dtype = key_padding_mask.dtype
if _kpm_dtype != torch.bool and not torch.is_floating_point(
key_padding_mask):
raise AssertionError(
"only bool and floating types of key_padding_mask are supported")
why_not_fast_path = ''
if not is_batched:
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
elif self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype:
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
elif self.training:
why_not_fast_path = "training is enabled"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.dropout:
why_not_fast_path = f"dropout was {self.dropout}, required zero"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif attn_mask is not None:
why_not_fast_path = "attn_mask was not None"
elif query.is_nested and key_padding_mask is not None:
why_not_fast_path = "key_padding_mask is not supported with NestedTensor input"
elif self.num_heads % 2 == 1:
why_not_fast_path = "num_heads is odd"
elif torch.is_autocast_enabled():
why_not_fast_path = "autocast is enabled"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif not all([(x is None or x.is_cuda or 'cpu' in str(x.device))
for x in tensor_args]):
why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any(
[x is not None and x.requires_grad for x in tensor_args]):
why_not_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad")
if not why_not_fast_path:
# Scale the query bias parameter and the query projection weight.
in_proj_weight = self._scaled_in_proj_weight()
in_proj_bias = self._scaled_in_proj_bias()
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
in_proj_weight,
in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
key_padding_mask if key_padding_mask is not None else attn_mask,
need_weights,
average_attn_weights,
1 if key_padding_mask is not None else
0 if attn_mask is not None else None)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
f"The fast path was not hit because {why_not_fast_path}")
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, average_attn_weights=average_attn_weights)
else:
# Scale the query bias parameter and the query projection weight.
in_proj_weight = self._scaled_in_proj_weight()
in_proj_bias = self._scaled_in_proj_bias()
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
in_proj_weight, in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, average_attn_weights=average_attn_weights)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
class MultiHeadedSelfAttention(nn.Module):
def __init__(self, config: ConformerConfig):
super().__init__()
self.config = config
self.ln = LayerNorm(dim=config.encoder_dim)
self.self_attention = MHSAwithQS(config)
if config.attention_residual_dropout_rate is None:
attention_residual_dropout_rate = 0.1
else:
attention_residual_dropout_rate = config.attention_residual_dropout_rate
self.dropout = nn.Dropout(p=attention_residual_dropout_rate)
def forward(self, outputs, paddings):
outputs = self.ln(outputs)
outputs, _ = self.self_attention(
query=outputs,
key=outputs,
value=outputs,
key_padding_mask=paddings==1,
need_weights=False,
)
outputs = self.dropout(outputs)
return outputs
class BatchNorm(nn.Module):
def __init__(self, config: ConformerConfig):
super().__init__()
running_mean = torch.zeros(config.encoder_dim)
running_var = torch.ones(config.encoder_dim)
self.register_buffer('running_mean', running_mean)
self.register_buffer('running_var', running_var)
self.scale = nn.Parameter(torch.zeros(config.encoder_dim))
self.bias = nn.Parameter(torch.zeros(config.encoder_dim))
self.register_buffer('momentum',
torch.FloatTensor([config.batch_norm_momentum]))
self.register_buffer('epsilon',
torch.FloatTensor([config.batch_norm_epsilon]))
self.register_buffer('dim', torch.FloatTensor([config.encoder_dim]))
# self.momentum = config.batch_norm_momentum
# self.epsilon = config.batch_norm_epsilon
# self.dim = config.encoder_dim
def forward(self, inputs, input_paddings):
#inputs: NHD
#padding: NH
mask = 1 - input_paddings[:, :, None]
if self.training:
count = mask.sum()
masked_inp = inputs.masked_fill(mask == 0, 0)
mean = (masked_inp).sum(dim=(0, 1)) / count
var = (torch.square(masked_inp - mean) * mask).sum(dim=(0, 1)) / count
self.running_mean = self.momentum * self.running_mean + (
1 - self.momentum) * mean.detach()
self.running_var = self.momentum * self.running_var + (
1 - self.momentum) * var.detach()
else:
mean = self.running_mean
var = self.running_var
v = (1 + self.scale) * torch.rsqrt(var + self.epsilon)
bn = (inputs - mean) * v + self.bias
output = bn.masked_fill(mask == 0, 0)
return output
class ConvolutionBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.ln = LayerNorm(dim=config.encoder_dim)
self.lin1 = nn.Linear(
in_features=config.encoder_dim, out_features=config.encoder_dim)
self.lin2 = nn.Linear(
in_features=config.encoder_dim, out_features=config.encoder_dim)
self.conv1 = nn.Conv1d(
in_channels=config.encoder_dim,
out_channels=config.encoder_dim,
kernel_size=(config.convolution_kernel_size,),
stride=(1,),
padding='same',
bias=False,
groups=config.encoder_dim)
self.bn = BatchNorm(config)
self.lin3 = nn.Linear(config.encoder_dim, config.encoder_dim)
if config.conv_residual_dropout_rate is None:
conv_residual_dropout_rate = 0.0
else:
conv_residual_dropout_rate = config.conv_residual_dropout_rate
self.dropout = nn.Dropout(p=conv_residual_dropout_rate)
def forward(self, inputs, input_paddings):
inputs = self.ln(inputs)
inputs = F.glu(torch.cat([self.lin1(inputs), self.lin2(inputs)], dim=2))
inputs = inputs * (1 - input_paddings[:, :, None])
inputs = inputs.permute(0, 2, 1)
inputs = self.conv1(inputs)
inputs = inputs.permute(0, 2, 1)
inputs = self.bn(inputs, input_paddings)
inputs = F.silu(inputs)
inputs = self.lin3(inputs)
inputs = self.dropout(inputs)
return inputs
class ConformerBlock(nn.Module):
def __init__(self, config: ConformerConfig):
super().__init__()
self.ff1 = FeedForwardModule(config)
self.mhsa = MultiHeadedSelfAttention(config)
self.conv = ConvolutionBlock(config)
self.ff2 = FeedForwardModule(config)
self.ln = LayerNorm(dim=config.encoder_dim)
def forward(self, inputs, input_paddings):
padding_mask = 1 - input_paddings[:, :, None]
inputs = inputs + 0.5 * self.ff1(inputs, padding_mask)
inputs = inputs + self.mhsa(inputs, input_paddings)
inputs = inputs + self.conv(inputs, input_paddings)
inputs = inputs + 0.5 * self.ff2(inputs, padding_mask)
inputs = self.ln(inputs)
return inputs
class ConformerEncoderDecoder(nn.Module):
def __init__(self, config: ConformerConfig):
super().__init__()
self.config = config
preprocessing_config = preprocessor.PreprocessorConfig()
self.preprocessor = preprocessor.MelFilterbankFrontend(
preprocessing_config,
per_bin_mean=preprocessor.LIBRISPEECH_MEAN_VECTOR,
per_bin_stddev=preprocessor.LIBRISPEECH_STD_VECTOR)
self.specaug = SpecAug(
freq_mask_count=config.freq_mask_count,
freq_mask_max_bins=config.freq_mask_max_bins,
time_mask_count=config.time_mask_count,
time_mask_max_frames=config.time_mask_max_frames,
time_mask_max_ratio=config.time_mask_max_ratio,
time_masks_per_frame=config.time_masks_per_frame,
use_dynamic_time_mask_max_frames=config.use_dynamic_time_mask_max_frames
)
if config.input_dropout_rate is None:
input_dropout_rate = 0.1
else:
input_dropout_rate = config.input_dropout_rate
self.subsample = Subsample(
encoder_dim=config.encoder_dim, input_dropout_rate=input_dropout_rate)
self.conformers = nn.ModuleList(
[ConformerBlock(config) for _ in range(config.num_encoder_layers)])
self.ln = LayerNorm(config.encoder_dim)
self.lin = nn.Linear(config.encoder_dim, config.vocab_size)
def forward(self, inputs, input_paddings):
outputs = inputs
output_paddings = input_paddings
outputs, output_paddings = self.preprocessor(outputs, output_paddings)
if self.training and self.config.use_specaug:
outputs, output_paddings = self.specaug(outputs, output_paddings)
outputs, output_paddings = self.subsample(outputs, output_paddings)
for conformer in self.conformers:
outputs = conformer(outputs, output_paddings)
outputs = self.ln(outputs)
outputs = self.lin(outputs)
return outputs, output_paddings
|
"""This is a pytorch implementation mirroring:
https://github.com/google/init2winit/blob/master/init2winit/model_lib/librispeech_preprocessor.py.
"""
from dataclasses import dataclass
import math
from typing import Any, Optional, Union
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
LIBRISPEECH_MEAN_VECTOR = [
-7.6047816276550293,
-7.1206226348876953,
-6.8864245414733887,
-6.8705768585205078,
-6.9667720794677734,
-7.1084094047546387,
-6.9528026580810547,
-6.783994197845459,
-6.6195521354675293,
-6.4876265525817871,
-6.4120659828186035,
-6.394047737121582,
-6.4244871139526367,
-6.3993711471557617,
-6.5158271789550781,
-6.7137999534606934,
-6.8476877212524414,
-6.9885001182556152,
-6.9221386909484863,
-7.146148681640625,
-7.2040400505065918,
-7.0537552833557129,
-7.3140382766723633,
-7.1223249435424805,
-7.30251407623291,
-7.1212143898010254,
-7.2425732612609863,
-7.1730537414550781,
-7.0979413986206055,
-7.088747501373291,
-6.9849910736083984,
-6.8787732124328613,
-6.7602753639221191,
-6.6300945281982422,
-6.5145769119262695,
-6.4245057106018066,
-6.356513500213623,
-6.31787633895874,
-6.2660770416259766,
-6.2468328475952148,
-6.2821526527404785,
-6.1908388137817383,
-6.2484354972839355,
-6.1472640037536621,
-6.0924725532531738,
-6.0171003341674805,
-5.9250402450561523,
-5.8535833358764648,
-5.8209109306335449,
-5.8118929862976074,
-5.80783748626709,
-5.7714629173278809,
-5.7453732490539551,
-5.7705655097961426,
-5.7765641212463379,
-5.7831673622131348,
-5.7954087257385254,
-5.7994823455810547,
-5.8023476600646973,
-5.8047118186950684,
-5.8168182373046875,
-5.8844799995422363,
-5.9727106094360352,
-6.0444660186767578,
-6.1284866333007812,
-6.2257585525512695,
-6.3157496452331543,
-6.39061164855957,
-6.4928598403930664,
-6.5498456954956055,
-6.6054320335388184,
-6.6508378982543945,
-6.66917610168457,
-6.6726889610290527,
-6.684234619140625,
-6.6974577903747559,
-6.75471830368042,
-6.7949142456054688,
-6.8634209632873535,
-6.94186544418335
]
LIBRISPEECH_STD_VECTOR = [
3.4353282451629639,
3.5962932109832764,
3.7012472152709961,
3.7369205951690674,
3.7535104751586914,
3.693629264831543,
3.6922497749328613,
3.7641522884368896,
3.8419716358184814,
3.8999848365783691,
3.9294240474700928,
3.9317409992218018,
3.9139585494995117,
3.9031598567962646,
3.8691999912261963,
3.8155081272125244,
3.7644970417022705,
3.7099106311798096,
3.6965086460113525,
3.6003766059875488,
3.5493226051330566,
3.5465121269226074,
3.45003604888916,
3.4712812900543213,
3.4084610939025879,
3.4408135414123535,
3.4104881286621094,
3.4217638969421387,
3.4312851428985596,
3.4199209213256836,
3.4305806159973145,
3.4382665157318115,
3.4580366611480713,
3.4817991256713867,
3.4958710670471191,
3.5036792755126953,
3.5047574043273926,
3.4988734722137451,
3.493056058883667,
3.4822943210601807,
3.459430456161499,
3.4612770080566406,
3.4559063911437988,
3.4755423069000244,
3.4971549510955811,
3.5326557159423828,
3.5705199241638184,
3.5920312404632568,
3.596907377243042,
3.5913500785827637,
3.5865931510925293,
3.5826809406280518,
3.5837743282318115,
3.5895791053771973,
3.5819313526153564,
3.5837869644165039,
3.5861184597015381,
3.5889589786529541,
3.592214822769165,
3.5939455032348633,
3.5856630802154541,
3.5884113311767578,
3.5921022891998291,
3.5870490074157715,
3.5806570053100586,
3.5731067657470703,
3.5617532730102539,
3.54980731010437,
3.5527374744415283,
3.5475366115570068,
3.5387849807739258,
3.5256178379058838,
3.5031836032867432,
3.4922726154327393,
3.4879646301269531,
3.4725594520568848,
3.4558389186859131,
3.4351828098297119,
3.4284293651580811,
3.4299170970916748
]
@dataclass
class PreprocessorConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
sample_rate = 16000
frame_size_ms = 25
frame_step_ms = 10
compute_energy = True
window_fn = 'HANNING'
output_log_floor = 1
pad_end = False
preemph = 0.97
preemph_htk_flavor = True
noise_scale = 0
num_bins = 80
lower_edge_hertz = 125
upper_edge_hertz = 7600
fft_overdrive = False
output_floor = 0.00001
def _hertz_to_mel(frequencies_hertz):
"""Convert hertz to mel."""
log_fn = math.log if type(frequencies_hertz) in [type(0.0), type(0)
] else torch.log
return _MEL_HIGH_FREQUENCY_Q * log_fn(1.0 + (frequencies_hertz /
_MEL_BREAK_FREQUENCY_HERTZ))
def _pad_end_length(num_timesteps, frame_step, frame_size):
"""Returns how many sample needed to be padded for pad_end feature."""
# The number of frames that can be extracted from the signal.
num_frames = int(np.ceil(num_timesteps / frame_step))
# Signal length required for computing `num_frames` frames.
padded_length = frame_step * (num_frames - 1) + frame_size
return padded_length - num_timesteps
def frame(x,
frame_length: int,
frame_step: int,
pad_end: bool = False,
pad_value: Union[int, float] = 0.0):
"""Slides a window and extract values.
This function extracts `x[:, n:n+frame_length, :]` with sliding `n` with
stride of `frame_step`, and returns an array `y` with the shape
`(batch_size, num_frames, frame_length, num_channels)`. Unlike the
counterpart in Tensorflow (`tf.signal.frame`), this function currently
does not take `axis` argument, and the input tensor `x` is expected to
have a shape of `(batch_size, timesteps, channels)`.
Args:
x: An input array with `(batch_size, timesteps, channels)`-shape.
frame_length: The frame length.
frame_step: The frame hop size.
pad_end: If True, the end of signal is padded so the window can continue
sliding while the starting point of the window is in the valid range.
pad_value: A scalar used as a padding value when `pad_end` is True.
Returns:
A tensor with shape `(*, num_frames, frame_length, num_channels)`.
"""
num_timesteps = x.shape[1]
if pad_end:
num_extends = _pad_end_length(num_timesteps, frame_step, frame_length)
x = F.pad(x, (0, 0, 0, num_extends), mode='constant', value=pad_value)
x = x.unfold(dimension=1, size=frame_length, step=frame_step)
return x.permute(0, 1, 3, 2)
def linear_to_mel_weight_matrix(num_mel_bins: int = 20,
num_spectrogram_bins: int = 129,
sample_rate: Union[int, float] = 8000,
lower_edge_hertz: Union[int, float] = 125.0,
upper_edge_hertz: Union[int, float] = 3800.0,
dtype: Any = torch.float32,
device='cpu'):
r"""Pytorch-port of `tf.signal.linear_to_mel_weight_matrix`.
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are in
the source spectrogram data, which is understood to be `fft_size // 2 + 1`,
i.e. the spectrogram only contains the nonredundant FFT bins.
sample_rate: An integer or float `Tensor`. Samples per second of the
input signal used to create the spectrogram. Used to figure out the
frequencies corresponding to each spectrogram bin, which dictates how they
are mapped into the mel scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
Returns:
An array of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not
positive, `lower_edge_hertz` is negative, frequency edges are incorrectly
ordered, `upper_edge_hertz` is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
# Input validator from tensorflow/python/ops/signal/mel_ops.py#L71
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got %s for sample_rate: %s' %
(upper_edge_hertz, sample_rate))
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = torch.linspace(
0.0, nyquist_hertz, num_spectrogram_bins, dtype=dtype,
device=device)[bands_to_zero:]
spectrogram_bins_mel = _hertz_to_mel(linear_frequencies)[:, None]
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
edges = torch.linspace(
_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2,
dtype=dtype,
device=device)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel = edges[:-2][None, :]
center_mel = edges[1:-1][None, :]
upper_edge_mel = edges[2:][None, :]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = torch.minimum(lower_slopes, upper_slopes).clamp(
min=0.0, max=None)
# Re-add the zeroed lower bins we sliced out above.
return F.pad(mel_weights_matrix, (0, 0, bands_to_zero, 0))
def _hanning_greco(win_support, frame_size, dtype, device='cpu'):
"""Add a greco-style hanning window to the graph.
Note that the Hanning window in Wikipedia is not the same as the Hanning
window in Greco. The Greco3 Hanning window at 0 is NOT 0, as the wikipedia
page would indicate. Talkin's explanation was that it was like wasting two
samples to have the values at the edge of the window to be 0.0 exactly.
Args:
win_support: Number of samples for non-zero support in the window
frame_size: Total size of the window (frame_size >= win_support)
dtype: TF data type
Returns:
Tensor of size frame_size with the window to apply.
"""
if frame_size < win_support:
raise ValueError(
'Provided frame_size = {} is lower than win_support = {}'.format(
frame_size, win_support))
arg = torch.pi * 2.0 / (win_support)
hann = 0.5 - (0.5 * torch.cos(
arg * (torch.arange(win_support, dtype=dtype, device=device) + 0.5)))
zero_size = frame_size - win_support
return F.pad(hann, (0, zero_size))
def _next_pow_of_two(x: Union[int, float]) -> int:
return int(2**np.ceil(np.log2(x)))
class SpectrogramFrontend(nn.Module):
"""Layer to convert input audio signals from time domain to frequency domain.
"""
def __init__(self,
config: PreprocessorConfig = None,
input_scale_factor: float = 1.0,
output_log: bool = False,
dtype=torch.float32,
device='cpu'):
super().__init__()
self.config = config
self.input_scale_factor = input_scale_factor
self.output_log = output_log
p = self.config
self._frame_step = int(round(p.sample_rate * p.frame_step_ms / 1000.0))
self._frame_size = int(round(
p.sample_rate * p.frame_size_ms / 1000.0)) + 1 # +1 for the preemph
# TF-version has maximum of 512, but it's not always necessary
self.fft_size = _next_pow_of_two(self._frame_size)
if p.window_fn is None:
self._window_fn = None
elif p.window_fn.upper() == 'HANNING':
def _hanning_window(frame_size, dtype):
# Preparing 1-point longer window to follow TF's definition
if frame_size % 2 == 0:
# simulate periodic=True in tf.signal.hann_window
return torch.hann_window(
window_length=frame_size,
periodic=True,
dtype=dtype,
device=device)
else:
return torch.hann_window(
window_length=frame_size,
periodic=False,
dtype=dtype,
device=device)
self._window_fn = _hanning_window
elif p.window_fn.upper() == 'HANNING_GRECO':
# Greco-compatible hanning window
def f(frame_size, dtype):
return _hanning_greco(
self._frame_size - 1, frame_size, dtype, device=device)
self._window_fn = f
else:
raise ValueError('Illegal value %r for window_fn param' % p.window_fn)
if self._window_fn is None:
self.window = None
else:
window = self._window_fn(self._frame_size - 1, dtype)
window = window.reshape((1, 1, self._frame_size - 1, 1))
self.register_buffer('window', window)
def _apply_preemphasis(self, framed_signal):
p = self.config
if p.preemph_htk_flavor:
return torch.cat([
framed_signal[:, :, :1, :] * (1. - p.preemph),
(framed_signal[:, :, 1:-1, :] -
p.preemph * framed_signal[:, :, :-2, :])
],
dim=2)
else:
return (framed_signal[:, :, 1:, :] -
p.preemph * framed_signal[:, :, :-1, :])
def fprop_paddings(self, input_paddings):
p = self.config
if p.pad_end:
num_extends = _pad_end_length(input_paddings.shape[1],
self._frame_step,
self._frame_size)
input_paddings = F.pad(input_paddings, (0, num_extends), value=1.0)
x = input_paddings.unfold(
dimension=1, size=self._frame_size, step=self._frame_step)
return x.min(dim=2)[0]
def forward(self, inputs, input_paddings):
p = self.config
# Expand to have a channel axis
if inputs.ndim == 2:
inputs = inputs[:, :, None]
output_paddings = None
if input_paddings is not None:
inputs = inputs * (1.0 - input_paddings[:, :, None])
output_paddings = self.fprop_paddings(input_paddings)
else:
output_paddings = None
pcm_audio_chunk = inputs * self.input_scale_factor
framed_signal = frame(
pcm_audio_chunk, self._frame_size, self._frame_step, pad_end=p.pad_end)
if p.preemph != 0.0:
preemphasized = self._apply_preemphasis(framed_signal)
else:
preemphasized = framed_signal[..., :-1, :]
if p.noise_scale > 0.0:
noise_signal = torch.randn_like(preemphasized) * p.noise_scale
else:
noise_signal = torch.zeros_like(preemphasized)
windowed_signal = preemphasized + noise_signal
# Window here
if self.window is not None:
windowed_signal *= self.window
spectrum = torch.fft.rfft(windowed_signal, self.fft_size, dim=2)
spectrum = torch.abs(spectrum)
if p.compute_energy:
spectrum = spectrum**2.0
outputs = spectrum
if self.output_log:
outputs = torch.log(torch.maximum(outputs, p.output_log_floor))
return outputs, output_paddings
class MelFilterbankFrontend(nn.Module):
"""Layer to compute log mel spectograms from input audio signals."""
def __init__(self,
config: PreprocessorConfig = None,
use_divide_stream: bool = True,
per_bin_mean: Optional[float] = None,
per_bin_stddev: Optional[float] = None,
device='cpu'):
super().__init__()
self.config = config
self.use_divide_stream = use_divide_stream
self.per_bin_mean = per_bin_mean
self.per_bin_stddev = per_bin_stddev
p = self.config
input_scale_factor = 2**-15 if self.use_divide_stream else 1.0
self.stft = SpectrogramFrontend(
p, input_scale_factor=input_scale_factor, output_log=False)
if self.per_bin_mean is None:
per_bin_mean = [0.0] * p.num_bins
else:
per_bin_mean = self.per_bin_mean
if self.per_bin_stddev is None:
per_bin_stddev = [1.0] * p.num_bins
else:
per_bin_stddev = self.per_bin_stddev
self.register_buffer('_normalizer_mean',
torch.FloatTensor(per_bin_mean)[None, None, :, None])
self.register_buffer('_normalizer_stddev',
torch.FloatTensor(per_bin_stddev)[None, None, :, None])
def forward(self, inputs, input_paddings):
p = self.config
spect, spect_paddings = self.stft(inputs, input_paddings)
mel_weights = linear_to_mel_weight_matrix(
num_mel_bins=p.num_bins,
num_spectrogram_bins=spect.shape[2],
sample_rate=p.sample_rate,
lower_edge_hertz=p.lower_edge_hertz,
upper_edge_hertz=p.upper_edge_hertz,
device=spect.device)
mel_spectrogram = torch.einsum('fn,btfc->btnc', mel_weights, spect)
logmel_spectrogram = torch.log(
mel_spectrogram.clamp(min=p.output_floor, max=None))
normalized_logmel_spectrogram = (
(logmel_spectrogram - self._normalizer_mean) / self._normalizer_stddev)
normalized_logmel_spectrogram = torch.squeeze(normalized_logmel_spectrogram,
-1)
return normalized_logmel_spectrogram, spect_paddings
|
"""Conformer workload implemented in PyTorch."""
import contextlib
import functools
import math
import random
from typing import Dict, Iterator, Optional, Tuple
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
import algorithmic_efficiency.random_utils as prng
from algorithmic_efficiency.workloads.librispeech_conformer import metrics
from algorithmic_efficiency.workloads.librispeech_conformer import workload
from algorithmic_efficiency.workloads.librispeech_conformer.input_pipeline import \
LibriSpeechDataset
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch import \
models as conformer_model
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
MAX_INPUT_LENGTH = 320000
class LibriSpeechConformerWorkload(workload.BaseLibrispeechWorkload):
def __init__(self,
tokenizer_vocab_path: Optional[str] = None,
use_specaug: bool = True) -> None:
super().__init__()
self.tokenizer = metrics.load_tokenizer(tokenizer_vocab_path)
self.use_specaug = use_specaug
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Conformer model init function.
Here we use dropout_rate as residual_dropout_rate, and aux_dropout_rate as
input_dropout_rate.
"""
torch.random.manual_seed(rng[0])
# Disable cudnn benchmark to avoid OOM errors.
torch.backends.cudnn.benchmark = False
model = conformer_model.ConformerEncoderDecoder(
conformer_model.ConformerConfig(
attention_residual_dropout_rate=dropout_rate,
feed_forward_residual_dropout_rate=dropout_rate,
conv_residual_dropout_rate=dropout_rate,
input_dropout_rate=aux_dropout_rate,
use_specaug=self.use_specaug))
self.ctc_loss = torch.nn.CTCLoss(blank=0, reduction='none')
# Run model once to initialize lazy layers.
# Run the initialization in eval mode to disable BN tracking.
model = model.eval()
t = MAX_INPUT_LENGTH
wave = torch.randn((2, t))
pad = torch.zeros_like(wave)
_ = model(wave, pad)
conformer_model.initialize(model)
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
self.requires_sync_before_eval = False
if N_GPUS > 1:
if USE_PYTORCH_DDP:
self.requires_sync_before_eval = True
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['lin.weight', 'lin.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
model = params
if mode == spec.ForwardPassMode.EVAL:
model.eval()
if mode == spec.ForwardPassMode.TRAIN:
model.train()
model.apply(
functools.partial(
pytorch_utils.update_batch_norm_fn,
update_batch_norm=update_batch_norm))
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
inputs, input_paddings = augmented_and_preprocessed_input_batch['inputs']
logits, logits_paddings = model(inputs.to(DEVICE),
input_paddings.to(DEVICE))
return (logits, logits_paddings), None
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del cache
del repeat_final_dataset
del num_batches
is_train = split == 'train'
if split == 'train':
ds_split = 'train-clean-100+train-clean-360+train-other-500'
elif split == 'eval_train':
ds_split = 'train-clean-100+train-clean-360+train-other-500'
elif split == 'validation':
ds_split = 'dev-clean+dev-other'
else:
ds_split = 'test-clean'
ds = LibriSpeechDataset(split=ds_split, data_dir=data_dir)
if split == 'eval_train':
indices = list(range(len(ds)))
random.Random(data_rng[0]).shuffle(indices)
ds = torch.utils.data.Subset(ds, indices[:self.num_eval_train_examples])
sampler = None
if USE_PYTORCH_DDP:
per_device_batch_size = global_batch_size // N_GPUS
ds_iter_batch_size = per_device_batch_size
else:
ds_iter_batch_size = global_batch_size
if USE_PYTORCH_DDP:
if is_train:
sampler = torch.utils.data.distributed.DistributedSampler(
ds, num_replicas=N_GPUS, rank=RANK, shuffle=True)
else:
sampler = data_utils.DistributedEvalSampler(
ds, num_replicas=N_GPUS, rank=RANK, shuffle=False)
dataloader = torch.utils.data.DataLoader(
ds,
batch_size=ds_iter_batch_size,
shuffle=not USE_PYTORCH_DDP and is_train,
sampler=sampler,
num_workers=4,
pin_memory=True,
drop_last=is_train)
dataloader = data_utils.cycle(
dataloader, custom_sampler=USE_PYTORCH_DDP, use_mixup=False)
return dataloader
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: Tuple[spec.Tensor, spec.Tensor], # (label_batch, padding)
logits_batch: Tuple[spec.Tensor, spec.Tensor], # (logits_batch, padding)
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
del label_smoothing
targets, target_paddings = label_batch
logits, logit_paddings = logits_batch
logprobs = torch.log_softmax(logits, dim=-1)
input_lengths = torch.einsum('bh->b', 1 - logit_paddings).long()
target_lengths = torch.einsum('bh->b', 1 - target_paddings).long()
per_example_losses = self.ctc_loss(
logprobs.permute(1, 0, 2),
targets.long(),
input_lengths,
target_lengths)
# mask_batch is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
mask_batch = torch.logical_and(mask_batch, target_lengths)
else:
mask_batch = target_lengths
n_valid_examples = mask_batch.sum().to(per_example_losses)
summed_loss = per_example_losses.sum()
n_valid_examples = max(n_valid_examples, 1)
return {
'summed': summed_loss,
'n_valid_examples': torch.as_tensor(n_valid_examples, device=DEVICE),
'per_example': per_example_losses,
}
def greedy_decode(
self, logits: spec.Tensor,
logit_paddings: spec.Tensor) -> Tuple[spec.Tensor, spec.Tensor]:
framewise_tokens = logits.max(dim=-1)[1]
framewise_tokens = framewise_tokens * (1 - logit_paddings)
# Add sentinel because unique_consecutive will flatten array
# and then compute the unique.
framewise_tokens = torch.cat(
[framewise_tokens, -torch.ones_like(framewise_tokens[:, 0:1])], dim=1)
_, indices = torch.unique_consecutive(framewise_tokens, return_inverse=True)
indices -= indices.min(dim=1, keepdims=True)[0]
result = torch.zeros_like(framewise_tokens)
result = result.scatter_(1, indices, framewise_tokens)
# Replace the sentinel column with 0s and remove it.
result[result == -1] = 0
result = result[:, :-1]
# Remove blanks (id = 0).
blank_id = 0
fin_result = torch.zeros_like(result)
idxs = torch.arange(
fin_result.numel(), device=result.device).view(*fin_result.shape)
mask = torch.arange(
fin_result.shape[1], device=result.device).view(
1, -1) < result.count_nonzero(dim=1).view(-1, 1)
fin_result.view(-1)[idxs[mask != 0]] = result[result != blank_id]
padding = fin_result == 0
return fin_result, padding
def sync_sd(self, params: spec.ParameterContainer) -> None:
sd = params.state_dict()
dist.barrier()
for k in sd:
dist.all_reduce(sd[k], op=dist.ReduceOp.SUM)
# Assumes N_GPUS is the world size.
sd[k] = sd[k] / N_GPUS
params.load_state_dict(sd)
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del global_step
data_rng, model_rng = prng.split(rng, 2)
if split not in self._eval_iters:
# These iterators repeat indefinitely.
self._eval_iters[split] = (
self._build_input_queue(
data_rng, split, data_dir, global_batch_size=global_batch_size))
total_metrics = {
'loss': torch.tensor(0., device=DEVICE),
'lengths': torch.tensor(0., device=DEVICE),
'word_errors': torch.tensor(0., device=DEVICE),
'num_words': torch.tensor(0., device=DEVICE),
}
num_batches = int(math.ceil(num_examples / global_batch_size))
if self.requires_sync_before_eval:
self.sync_sd(params)
for _ in range(num_batches):
batch = next(self._eval_iters[split])
(logits, logits_padding), _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
model_rng,
update_batch_norm=False)
decoded, decoded_paddings = self.greedy_decode(logits, logits_padding)
targets, target_paddings = batch['targets']
word_errors, num_words = metrics.compute_wer(
decoded=decoded.cpu().numpy(),
decoded_paddings=decoded_paddings.cpu().numpy(),
targets=targets.cpu().numpy(),
target_paddings=target_paddings.cpu().numpy(),
tokenizer=self.tokenizer)
loss = self.loss_fn((targets, target_paddings), (logits, logits_padding))
summed_loss = loss['summed']
lengths = loss['n_valid_examples']
batch_metrics = {
'loss': summed_loss,
'lengths': lengths,
'word_errors': word_errors,
'num_words': num_words,
}
total_metrics = {
k: v + batch_metrics[k] for k, v in total_metrics.items()
}
if USE_PYTORCH_DDP:
for metric in total_metrics.values():
dist.all_reduce(metric)
return {
'ctc_loss':
float(total_metrics['loss'].item() /
total_metrics['lengths'].item()),
'wer':
float(total_metrics['word_errors'].item() /
total_metrics['num_words'].item()),
}
|
# Forked from Flax example which can be found here:
# https://github.com/google/flax/blob/main/examples/ogbg_molpcba/train.py
from typing import Any
from clu import metrics
import flax
import jax
import jax.numpy as jnp
import numpy as np
from sklearn.metrics import average_precision_score
import torch
import torch.distributed as dist
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup()
def predictions_match_labels(*,
logits: jnp.ndarray,
labels: jnp.ndarray,
**kwargs) -> jnp.ndarray:
"""Returns a binary array indicating where predictions match the labels."""
del kwargs # Unused.
preds = logits > 0
return (preds == labels).astype(jnp.float32)
@flax.struct.dataclass
class MeanAveragePrecision(
metrics.CollectingMetric.from_outputs(('logits', 'labels', 'mask'))):
"""Computes the mean average precision (mAP) over different tasks."""
def compute(self):
# Matches the official OGB evaluation scheme for mean average precision.
values = super().compute()
labels = values['labels']
logits = values['logits']
mask = values['mask']
if USE_PYTORCH_DDP:
# Sync labels, logits, and masks across devices.
all_values = [labels, logits, mask]
for idx, array in enumerate(all_values):
tensor = torch.as_tensor(array, device=DEVICE)
# Assumes that the tensors on all devices have the same shape.
all_tensors = [torch.zeros_like(tensor) for _ in range(N_GPUS)]
dist.all_gather(all_tensors, tensor)
all_values[idx] = torch.cat(all_tensors).cpu().numpy()
labels, logits, mask = all_values
mask = mask.astype(bool)
probs = jax.nn.sigmoid(logits)
num_tasks = labels.shape[1]
average_precisions = np.full(num_tasks, np.nan)
# Note that this code is slow (~1 minute).
for task in range(num_tasks):
# AP is only defined when there is at least one negative data
# and at least one positive data.
if np.sum(labels[:, task] == 0) > 0 and np.sum(labels[:, task] == 1) > 0:
is_labeled = mask[:, task]
average_precisions[task] = average_precision_score(
labels[is_labeled, task], probs[is_labeled, task])
# When all APs are NaNs, return NaN. This avoids raising a RuntimeWarning.
if np.isnan(average_precisions).all():
return np.nan
return np.nanmean(average_precisions)
class AverageDDP(metrics.Average):
"""Supports syncing metrics for PyTorch distributed data parallel (DDP)."""
def compute(self) -> Any:
if USE_PYTORCH_DDP:
# Sync counts across devices.
total_tensor = torch.tensor(np.asarray(self.total), device=DEVICE)
count_tensor = torch.tensor(np.asarray(self.count), device=DEVICE)
dist.all_reduce(total_tensor)
dist.all_reduce(count_tensor)
# Hacky way to avoid FrozenInstanceError
# (https://docs.python.org/3/library/dataclasses.html#frozen-instances).
object.__setattr__(self, 'total', total_tensor.cpu().numpy())
object.__setattr__(self, 'count', count_tensor.cpu().numpy())
return super().compute()
@flax.struct.dataclass
class EvalMetrics(metrics.Collection):
accuracy: AverageDDP.from_fun(predictions_match_labels)
loss: AverageDDP.from_output('loss')
mean_average_precision: MeanAveragePrecision
|
# Forked from Flax example which can be found here:
# https://github.com/google/flax/blob/main/examples/ogbg_molpcba/input_pipeline.py
# and from the init2winit fork here
# https://github.com/google/init2winit/blob/master/init2winit/dataset_lib/ogbg_molpcba.py
"""Exposes the ogbg-molpcba dataset in a convenient format."""
import jax
import jraph
import numpy as np
import tensorflow_datasets as tfds
import torch
AVG_NODES_PER_GRAPH = 26
AVG_EDGES_PER_GRAPH = 56
TFDS_SPLIT_NAME = {
'train': 'train',
'eval_train': 'train',
'validation': 'validation',
'test': 'test',
}
def _load_dataset(split, should_shuffle, data_rng, data_dir):
"""Loads a dataset split from TFDS."""
if should_shuffle:
file_data_rng, dataset_data_rng = jax.random.split(data_rng)
file_data_rng = file_data_rng[0]
dataset_data_rng = dataset_data_rng[0]
else:
file_data_rng = None
dataset_data_rng = None
read_config = tfds.ReadConfig(add_tfds_id=True, shuffle_seed=file_data_rng)
dataset = tfds.load(
'ogbg_molpcba:0.1.3',
split=TFDS_SPLIT_NAME[split],
shuffle_files=should_shuffle,
read_config=read_config,
data_dir=data_dir)
if should_shuffle:
dataset = dataset.shuffle(seed=dataset_data_rng, buffer_size=2**15)
dataset = dataset.repeat()
# We do not need to worry about repeating the dataset for evaluations because
# we call itertools.cycle on the eval iterator, which stored the iterator in
# memory to be repeated through.
return dataset
def _to_jraph(example):
"""Converts an example graph to jraph.GraphsTuple."""
example = jax.tree_map(lambda x: x._numpy(), example) # pylint: disable=protected-access
edge_feat = example['edge_feat']
node_feat = example['node_feat']
edge_index = example['edge_index']
labels = example['labels']
num_nodes = example['num_nodes']
senders = edge_index[:, 0]
receivers = edge_index[:, 1]
return jraph.GraphsTuple(
n_node=num_nodes,
n_edge=np.array([len(edge_index) * 2]),
nodes=node_feat,
edges=np.concatenate([edge_feat, edge_feat]),
# Make the edges bidirectional
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
# Keep the labels with the graph for batching. They will be removed
# in the processed batch.
globals=np.expand_dims(labels, axis=0))
def _get_weights_by_nan_and_padding(labels, padding_mask):
"""Handles NaNs and padding in labels.
Sets all the weights from examples coming from padding to 0. Changes all NaNs
in labels to 0s and sets the corresponding per-label weight to 0.
Args:
labels: Labels including labels from padded examples
padding_mask: Binary array of which examples are padding
Returns:
tuple of (processed labels, corresponding weights)
"""
nan_mask = np.isnan(labels)
replaced_labels = np.copy(labels)
np.place(replaced_labels, nan_mask, 0)
weights = 1.0 - nan_mask
# Weights for all labels of a padded element will be 0
weights = weights * padding_mask[:, None]
return replaced_labels, weights
def _get_batch_iterator(dataset_iter, global_batch_size, num_shards=None):
"""Turns a per-example iterator into a batched iterator.
Constructs the batch from num_shards smaller batches, so that we can easily
shard the batch to multiple devices during training. We use
dynamic batching, so we specify some max number of graphs/nodes/edges, add
as many graphs as we can, and then pad to the max values.
Args:
dataset_iter: The TFDS dataset iterator.
global_batch_size: How many average-sized graphs go into the batch.
num_shards: How many devices we should be able to shard the batch into.
Yields:
Batch in the init2winit format. Each field is a list of num_shards separate
smaller batches.
"""
if not num_shards:
num_shards = max(torch.cuda.device_count(), jax.local_device_count())
# We will construct num_shards smaller batches and then put them together.
per_device_batch_size = global_batch_size // num_shards
max_n_nodes = AVG_NODES_PER_GRAPH * per_device_batch_size
max_n_edges = AVG_EDGES_PER_GRAPH * per_device_batch_size
max_n_graphs = per_device_batch_size
jraph_iter = map(_to_jraph, dataset_iter)
batched_iter = jraph.dynamically_batch(jraph_iter,
max_n_nodes + 1,
max_n_edges,
max_n_graphs + 1)
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
for batched_graph in batched_iter:
count += 1
# Separate the labels from the graph
labels = batched_graph.globals
graph = batched_graph._replace(globals={})
replaced_labels, weights = _get_weights_by_nan_and_padding(
labels, jraph.get_graph_padding_mask(graph))
graphs_shards.append(graph)
labels_shards.append(replaced_labels)
weights_shards.append(weights)
if count == num_shards:
def f(x):
return jax.tree_map(lambda *vals: np.stack(vals, axis=0), x[0], *x[1:])
graphs_shards = f(graphs_shards)
labels_shards = f(labels_shards)
weights_shards = f(weights_shards)
yield {
'inputs': graphs_shards,
'targets': labels_shards,
'weights': weights_shards,
}
count = 0
graphs_shards = []
labels_shards = []
weights_shards = []
def get_dataset_iter(split, data_rng, data_dir, global_batch_size):
shuffle = split in ['train', 'eval_train']
ds = _load_dataset(
split, should_shuffle=shuffle, data_rng=data_rng, data_dir=data_dir)
return _get_batch_iterator(iter(ds), global_batch_size)
|
"""OGBG workload parent class."""
import abc
import itertools
import math
from typing import Any, Dict, Optional
import jax
from algorithmic_efficiency import random_utils as prng
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.ogbg import input_pipeline
from algorithmic_efficiency.workloads.ogbg import metrics
class BaseOgbgWorkload(spec.Workload):
_num_outputs: int = 128
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'mean_average_precision'
def has_reached_validation_target(self, eval_result: float) -> bool:
return eval_result[
'validation/mean_average_precision'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 0.28098
def has_reached_test_target(self, eval_result: float) -> bool:
return eval_result['test/mean_average_precision'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 0.268729
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
return 350343
@property
def num_eval_train_examples(self) -> int:
return 43793
@property
def num_validation_examples(self) -> int:
return 43793
@property
def num_test_examples(self) -> int:
return 43793
@property
def eval_batch_size(self) -> int:
return 32768
@property
def train_mean(self):
raise NotImplementedError
@property
def train_stddev(self):
raise NotImplementedError
@property
def max_allowed_runtime_sec(self) -> int:
return 18_477 # ~5 hours
@property
def eval_period_time_sec(self) -> int:
return 4 * 60
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int):
dataset_iter = input_pipeline.get_dataset_iter(split,
data_rng,
data_dir,
global_batch_size)
if split != 'train':
# Note that this stores the entire val dataset in memory.
dataset_iter = itertools.cycle(dataset_iter)
return dataset_iter
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
per_example_losses = self._binary_cross_entropy_with_mask(
labels=label_batch,
logits=logits_batch,
mask=mask_batch,
label_smoothing=label_smoothing)
if mask_batch is not None:
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 80_000
@abc.abstractmethod
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
def _eval_batch(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> metrics.EvalMetrics:
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
return self._eval_metric(batch['targets'], logits, batch['weights'])
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del global_step
data_rng, model_rng = prng.split(rng, 2)
if split not in self._eval_iters:
self._eval_iters[split] = self._build_input_queue(
data_rng, split, data_dir, global_batch_size=global_batch_size)
total_metrics = None
num_eval_steps = int(math.ceil(float(num_examples) / global_batch_size))
# Loop over graph batches in eval dataset.
for _ in range(num_eval_steps):
batch = next(self._eval_iters[split])
batch_metrics = self._eval_batch(params, batch, model_state, model_rng)
total_metrics = (
batch_metrics
if total_metrics is None else total_metrics.merge(batch_metrics))
if total_metrics is None:
return {}
return self._normalize_eval_metrics(num_examples, total_metrics)
|
# Ported to PyTorch from
# https://github.com/google/init2winit/blob/master/init2winit/model_lib/gnn.py.
from typing import Callable, Optional, Tuple
import jax.tree_util as tree
from jraph import GraphsTuple
import torch
from torch import nn
from algorithmic_efficiency import init_utils
def _make_mlp(in_dim, hidden_dims, dropout_rate):
"""Creates a MLP with specified dimensions."""
layers = nn.Sequential()
for dim in hidden_dims:
layers.add_module('dense', nn.Linear(in_features=in_dim, out_features=dim))
layers.add_module('norm', nn.LayerNorm(dim, eps=1e-6))
layers.add_module('relu', nn.ReLU())
layers.add_module('dropout', nn.Dropout(dropout_rate))
return layers
class GNN(nn.Module):
"""Defines a graph network.
The model assumes the input data is a jraph.GraphsTuple without global
variables. The final prediction will be encoded in the globals.
"""
latent_dim: int = 256
hidden_dims: Tuple[int] = (256,)
num_message_passing_steps: int = 5
def __init__(self,
num_outputs: int = 128,
dropout_rate: Optional[float] = 0.1) -> None:
super().__init__()
self.num_outputs = num_outputs
if dropout_rate is None:
dropout_rate = 0.1
# in_features are specifically chosen for the ogbg workload.
self.node_embedder = nn.Linear(in_features=9, out_features=self.latent_dim)
self.edge_embedder = nn.Linear(in_features=3, out_features=self.latent_dim)
graph_network_layers = []
for st in range(self.num_message_passing_steps):
# Constants in in_dims are based on the requirements of the GraphNetwork.
if st == 0:
in_dim = self.latent_dim * 3 + self.num_outputs
last_in_dim = self.latent_dim * 2 + self.num_outputs
else:
in_dim = self.hidden_dims[-1] * 4
last_in_dim = self.hidden_dims[-1] * 3
graph_network_layers.append(
GraphNetwork(
update_edge_fn=_make_mlp(in_dim, self.hidden_dims, dropout_rate),
update_node_fn=_make_mlp(in_dim, self.hidden_dims, dropout_rate),
update_global_fn=_make_mlp(last_in_dim,
self.hidden_dims,
dropout_rate)))
self.graph_network = nn.Sequential(*graph_network_layers)
self.decoder = nn.Linear(
in_features=self.hidden_dims[-1], out_features=self.num_outputs)
for m in self.modules():
if isinstance(m, nn.Linear):
init_utils.pytorch_default_init(m)
def forward(self, graph: GraphsTuple) -> torch.Tensor:
graph = graph._replace(
globals=torch.zeros([graph.n_node.shape[0], self.num_outputs],
device=graph.n_node.device))
graph = graph._replace(nodes=self.node_embedder(graph.nodes))
graph = graph._replace(edges=self.edge_embedder(graph.edges))
graph = self.graph_network(graph)
# Map globals to represent the final result
graph = graph._replace(globals=self.decoder(graph.globals))
return graph.globals
class GraphNetwork(nn.Module):
"""Returns a method that applies a configured GraphNetwork.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
than the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Example usage::
gn = GraphNetwork(update_edge_function,
update_node_function, **kwargs)
# Conduct multiple rounds of message passing with the same parameters:
for _ in range(num_message_passing_steps):
graph = gn(graph)
Args:
update_edge_fn: function used to update the edges or None to deactivate edge
updates.
update_node_fn: function used to update the nodes or None to deactivate node
updates.
update_global_fn: function used to update the globals or None to deactivate
globals updates.
Returns:
A method that applies the configured GraphNetwork.
"""
def __init__(self,
update_edge_fn: Optional[Callable] = None,
update_node_fn: Optional[Callable] = None,
update_global_fn: Optional[Callable] = None) -> None:
super().__init__()
self.update_edge_fn = update_edge_fn
self.update_node_fn = update_node_fn
self.update_global_fn = update_global_fn
def forward(self, graph: GraphsTuple) -> GraphsTuple:
"""Applies a configured GraphNetwork to a graph.
This implementation follows Algorithm 1 in https://arxiv.org/abs/1806.01261
There is one difference. For the nodes update the class aggregates over the
sender edges and receiver edges separately. This is a bit more general
the algorithm described in the paper. The original behaviour can be
recovered by using only the receiver edge aggregations for the update.
In addition this implementation supports softmax attention over incoming
edge features.
Many popular Graph Neural Networks can be implemented as special cases of
GraphNets, for more information please see the paper.
Args:
graph: a `GraphsTuple` containing the graph.
Returns:
Updated `GraphsTuple`.
"""
nodes, edges, receivers, senders, globals_, n_node, n_edge = graph
sum_n_node = tree.tree_leaves(nodes)[0].shape[0]
if not tree.tree_all(
tree.tree_map(lambda n: n.shape[0] == sum_n_node, nodes)):
raise ValueError(
'All node arrays in nest must contain the same number of nodes.')
sent_attributes = tree.tree_map(lambda n: n[senders], nodes)
received_attributes = tree.tree_map(lambda n: n[receivers], nodes)
# Here we scatter the global features to the corresponding edges,
# giving us tensors of shape [num_edges, global_feat].
global_edge_attributes = tree.tree_map(
lambda g: torch.repeat_interleave(g, n_edge, dim=0), globals_)
if self.update_edge_fn:
edge_fn_inputs = torch.cat(
[edges, sent_attributes, received_attributes, global_edge_attributes],
dim=-1)
edges = self.update_edge_fn(edge_fn_inputs)
if self.update_node_fn:
sent_attributes = tree.tree_map(
lambda e: scatter_sum(e, senders, dim=0, dim_size=sum_n_node), edges)
received_attributes = tree.tree_map(
lambda e: scatter_sum(e, receivers, dim=0, dim_size=sum_n_node),
edges)
# Here we scatter the global features to the corresponding nodes,
# giving us tensors of shape [num_nodes, global_feat].
global_attributes = tree.tree_map(
lambda g: torch.repeat_interleave(g, n_node, dim=0), globals_)
node_fn_inputs = torch.cat(
[nodes, sent_attributes, received_attributes, global_attributes],
dim=-1)
nodes = self.update_node_fn(node_fn_inputs)
if self.update_global_fn:
n_graph = n_node.shape[0]
graph_idx = torch.arange(n_graph, device=graph.n_node.device)
# To aggregate nodes and edges from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# For example, if you have `n_node=[1,2]`, we construct the tensor
# [0, 1, 1]. We then do the same for edges.
node_gr_idx = torch.repeat_interleave(graph_idx, n_node, dim=0)
edge_gr_idx = torch.repeat_interleave(graph_idx, n_edge, dim=0)
# We use the aggregation function to pool the nodes/edges per graph.
node_attributes = tree.tree_map(
lambda n: scatter_sum(n, node_gr_idx, dim=0, dim_size=n_graph), nodes)
edge_attributes = tree.tree_map(
lambda e: scatter_sum(e, edge_gr_idx, dim=0, dim_size=n_graph), edges)
# These pooled nodes are the inputs to the global update fn.
global_fn_inputs = torch.cat([node_attributes, edge_attributes, globals_],
dim=-1)
globals_ = self.update_global_fn(global_fn_inputs)
return GraphsTuple(
nodes=nodes,
edges=edges,
receivers=receivers,
senders=senders,
globals=globals_,
n_node=n_node,
n_edge=n_edge)
# Forked from
# github.com/rusty1s/pytorch_scatter/blob/master/torch_scatter/scatter.py.
def scatter_sum(src: torch.Tensor,
index: torch.Tensor,
dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None) -> torch.Tensor:
r"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/add.svg?sanitize=true
:align: center
:width: 400px
|
Reduces all values from the :attr:`src` tensor into :attr:`out` at the
indices specified in the :attr:`index` tensor along a given axis
:attr:`dim`.
For each value in :attr:`src`, its output index is specified by its index
in :attr:`src` for dimensions outside of :attr:`dim` and by the
corresponding value in :attr:`index` for dimension :attr:`dim`.
The applied reduction is here defined as a sum.
Formally, if :attr:`src` and :attr:`index` are :math:`n`-dimensional
tensors with size :math:`(x_0, ..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})`
and :attr:`dim` = `i`, then :attr:`out` must be an :math:`n`-dimensional
tensor with size :math:`(x_0, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})`.
Moreover, the values of :attr:`index` must be between :math:`0` and
:math:`y - 1`, although no specific ordering of indices is required.
The :attr:`index` tensor supports broadcasting in case its dimensions do
not match with :attr:`src`.
For one-dimensional tensors, the operation computes
.. math::
\mathrm{out}_i = \mathrm{out}_i + \sum_j~\mathrm{src}_j
where :math:`\sum_j` is over :math:`j` such that
:math:`\mathrm{index}_j = i`.
.. note::
This operation is implemented via atomic operations on the GPU and is
therefore **non-deterministic** since the order of parallel operations
to the same value is undetermined.
For floating-point variables, this results in a source of variance in
the result.
:param src: The source tensor.
:param index: The indices of elements to scatter.
:param dim: The axis along which to index. (default: :obj:`-1`)
:param out: The destination tensor.
:param dim_size: If :attr:`out` is not given, automatically create output
with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor
according to :obj:`index.max() + 1` is returned.
:rtype: :class:`Tensor`
.. code-block:: python
src = torch.randn(10, 6, 64)
index = torch.tensor([0, 1, 0, 1, 2, 1])
# Broadcasting in the first and last dim.
out = scatter_sum(src, index, dim=1)
print(out.size())
.. code-block::
torch.Size([10, 3, 64])
"""
index = broadcast(index, src, dim)
if out is None:
size = list(src.size())
if dim_size is not None:
size[dim] = dim_size
elif index.numel() == 0:
size[dim] = 0
else:
size[dim] = int(index.max()) + 1
out = torch.zeros(size, dtype=src.dtype, device=src.device)
return out.scatter_add_(dim, index, src)
else:
return out.scatter_add_(dim, index, src)
# Forked from
# github.com/rusty1s/pytorch_scatter/blob/master/torch_scatter/utils.py.
def broadcast(src: torch.Tensor, other: torch.Tensor, dim: int):
if dim < 0:
dim = other.dim() + dim
if src.dim() == 1:
for _ in range(0, dim):
src = src.unsqueeze(0)
for _ in range(src.dim(), other.dim()):
src = src.unsqueeze(-1)
src = src.expand(other.size())
return src
|
"""OGBG workload implemented in PyTorch."""
import contextlib
from typing import Any, Callable, Dict, Optional, Tuple
import jax
from jraph import GraphsTuple
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.ogbg import metrics
from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.models import GNN
from algorithmic_efficiency.workloads.ogbg.workload import BaseOgbgWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
def _pytorch_map(inputs: Any) -> Any:
if USE_PYTORCH_DDP:
return jax.tree_map(lambda a: torch.as_tensor(a, device=DEVICE), inputs)
return jax.tree_map(
lambda a: torch.as_tensor(a, device=DEVICE).view(-1, a.shape[-1])
if len(a.shape) == 3 else torch.as_tensor(a, device=DEVICE).view(-1),
inputs)
def _shard(inputs: Any) -> Any:
if not USE_PYTORCH_DDP:
return inputs
return jax.tree_map(lambda tensor: tensor[RANK], inputs)
def _graph_map(function: Callable, graph: GraphsTuple) -> GraphsTuple:
return GraphsTuple(
nodes=function(graph.nodes),
edges=function(graph.edges),
receivers=function(graph.receivers),
senders=function(graph.senders),
globals=function(graph.globals),
n_node=function(graph.n_node),
n_edge=function(graph.n_edge))
class OgbgWorkload(BaseOgbgWorkload):
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
loss_dict = super().loss_fn(label_batch,
logits_batch,
mask_batch,
label_smoothing)
loss_dict['n_valid_examples'] = torch.as_tensor(
loss_dict['n_valid_examples'], device=DEVICE)
return loss_dict
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int):
# TODO: Check where the + 1 comes from.
per_device_batch_size = int(global_batch_size / N_GPUS) + 1
# Only create and iterate over tf input pipeline in one Python process to
# avoid creating too many threads.
if RANK == 0:
data_rng = data_rng.astype('uint32')
dataset_iter = super()._build_input_queue(data_rng,
split,
data_dir,
global_batch_size)
while True:
if RANK == 0:
batch = next(dataset_iter) # pylint: disable=stop-iteration-return
graph = _graph_map(_pytorch_map, batch['inputs'])
targets = torch.as_tensor(batch['targets'], device=DEVICE)
weights = torch.as_tensor(
batch['weights'], dtype=torch.bool, device=DEVICE)
# Send batch to other devices when using DDP.
if USE_PYTORCH_DDP:
dist.broadcast_object_list([graph], src=0, device=DEVICE)
# During eval, the batch size of the remainder might be different.
if split != 'train':
per_device_batch_size = torch.tensor(
len(targets[0]), dtype=torch.int32, device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
dist.broadcast(targets, src=0)
targets = targets[0]
dist.broadcast(weights, src=0)
weights = weights[0]
else:
targets = targets.view(-1, targets.shape[-1])
weights = weights.view(-1, weights.shape[-1])
else:
graph = [None]
dist.broadcast_object_list(graph, src=0, device=DEVICE)
graph = graph[0]
# During eval, the batch size of the remainder might be different.
if split != 'train':
per_device_batch_size = torch.empty((1,),
dtype=torch.int32,
device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
targets = torch.empty(
(N_GPUS, per_device_batch_size, self._num_outputs), device=DEVICE)
dist.broadcast(targets, src=0)
targets = targets[RANK]
weights = torch.empty(
(N_GPUS, per_device_batch_size, self._num_outputs),
dtype=torch.bool,
device=DEVICE)
dist.broadcast(weights, src=0)
weights = weights[RANK]
batch = {
'inputs': _graph_map(_shard, graph),
'targets': targets,
'weights': weights,
}
yield batch
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""aux_dropout_rate is unused."""
del aux_dropout_rate
torch.random.manual_seed(rng[0])
model = GNN(num_outputs=self._num_outputs, dropout_rate=dropout_rate)
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['decoder.weight', 'decoder.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
"""Get predicted logits from the network for input graphs."""
del rng
del update_batch_norm # No BN in the GNN model.
if model_state is not None:
raise ValueError(
f'Expected model_state to be None, received {model_state}.')
model = params
if mode == spec.ForwardPassMode.TRAIN:
model.train()
elif mode == spec.ForwardPassMode.EVAL:
model.eval()
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits = model(augmented_and_preprocessed_input_batch['inputs'])
return logits, None
def _binary_cross_entropy_with_mask(
self,
labels: torch.Tensor,
logits: torch.Tensor,
mask: torch.Tensor,
label_smoothing: float = 0.0) -> torch.Tensor:
"""Binary cross entropy loss for logits, with masked elements."""
if not (logits.shape == labels.shape == mask.shape): # pylint: disable=superfluous-parens
raise ValueError(
f'Shape mismatch between logits ({logits.shape}), targets '
f'({labels.shape}), and weights ({mask.shape}).')
if len(logits.shape) != 2:
raise ValueError(f'Rank of logits ({logits.shape}) must be 2.')
# To prevent propagation of NaNs during grad().
# We mask over the loss for invalid targets later.
labels = torch.where(mask.to(torch.bool), labels, -1)
# Apply label_smoothing.
num_classes = labels.shape[-1]
smoothed_labels = ((1.0 - label_smoothing) * labels +
label_smoothing / num_classes)
# Numerically stable implementation of BCE loss.
# This mimics TensorFlow's tf.nn.sigmoid_cross_entropy_with_logits().
positive_logits = logits >= 0
relu_logits = torch.where(positive_logits, logits, 0)
abs_logits = torch.where(positive_logits, logits, -logits)
losses = relu_logits - (logits * smoothed_labels) + (
torch.log(1 + torch.exp(-abs_logits)))
return torch.where(mask.to(torch.bool), losses, 0.)
def _eval_metric(self, labels, logits, masks):
loss = self.loss_fn(labels, logits, masks)
return metrics.EvalMetrics.single_from_model_output(
loss=loss['per_example'].cpu().numpy(),
logits=logits.cpu().numpy(),
labels=labels.cpu().numpy(),
mask=masks.cpu().numpy())
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
del num_examples
return {k: float(v) for k, v in total_metrics.compute().items()}
|
# Forked from the init2winit implementation here
# https://github.com/google/init2winit/blob/master/init2winit/model_lib/gnn.py.
from typing import Optional, Tuple
from flax import linen as nn
import jax.numpy as jnp
import jraph
def _make_embed(latent_dim, name):
def make_fn(inputs):
return nn.Dense(features=latent_dim, name=name)(inputs)
return make_fn
def _make_mlp(hidden_dims, dropout):
"""Creates a MLP with specified dimensions."""
@jraph.concatenated_args
def make_fn(inputs):
x = inputs
for dim in hidden_dims:
x = nn.Dense(features=dim)(x)
x = nn.LayerNorm()(x)
x = nn.relu(x)
x = dropout(x)
return x
return make_fn
class GNN(nn.Module):
"""Defines a graph network.
The model assumes the input data is a jraph.GraphsTuple without global
variables. The final prediction will be encoded in the globals.
"""
num_outputs: int
latent_dim: int = 256
hidden_dims: Tuple[int] = (256,)
# If None, defaults to 0.1.
dropout_rate: Optional[float] = 0.1
num_message_passing_steps: int = 5
@nn.compact
def __call__(self, graph, train):
if self.dropout_rate is None:
dropout_rate = 0.1
else:
dropout_rate = self.dropout_rate
dropout = nn.Dropout(rate=dropout_rate, deterministic=not train)
graph = graph._replace(
globals=jnp.zeros([graph.n_node.shape[0], self.num_outputs]))
embedder = jraph.GraphMapFeatures(
embed_node_fn=_make_embed(self.latent_dim, name='node_embedding'),
embed_edge_fn=_make_embed(self.latent_dim, name='edge_embedding'))
graph = embedder(graph)
for _ in range(self.num_message_passing_steps):
net = jraph.GraphNetwork(
update_edge_fn=_make_mlp(self.hidden_dims, dropout=dropout),
update_node_fn=_make_mlp(self.hidden_dims, dropout=dropout),
update_global_fn=_make_mlp(self.hidden_dims, dropout=dropout))
graph = net(graph)
# Map globals to represent the final result
decoder = jraph.GraphMapFeatures(embed_global_fn=nn.Dense(self.num_outputs))
graph = decoder(graph)
return graph.globals
|
"""OGBG workload implemented in Jax."""
import functools
from typing import Any, Dict, Optional, Tuple
from flax import jax_utils
import jax
import jax.numpy as jnp
import jraph
import optax
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.ogbg import metrics
from algorithmic_efficiency.workloads.ogbg.ogbg_jax import models
from algorithmic_efficiency.workloads.ogbg.workload import BaseOgbgWorkload
class OgbgWorkload(BaseOgbgWorkload):
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""aux_dropout_rate is unused."""
del aux_dropout_rate
rng, params_rng, dropout_rng = jax.random.split(rng, 3)
self._model = models.GNN(self._num_outputs, dropout_rate=dropout_rate)
init_fn = jax.jit(functools.partial(self._model.init, train=False))
fake_batch = jraph.GraphsTuple(
n_node=jnp.asarray([1]),
n_edge=jnp.asarray([1]),
nodes=jnp.ones((1, 9)),
edges=jnp.ones((1, 3)),
globals=jnp.zeros((1, self._num_outputs)),
senders=jnp.asarray([0]),
receivers=jnp.asarray([0]))
params = init_fn({'params': params_rng, 'dropout': dropout_rng}, fake_batch)
params = params['params']
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
return jax_utils.replicate(params), None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_17'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
"""Get predicted logits from the network for input graphs."""
del update_batch_norm # No BN in the GNN model.
if model_state is not None:
raise ValueError(
f'Expected model_state to be None, received {model_state}.')
train = mode == spec.ForwardPassMode.TRAIN
logits = self._model.apply({'params': params},
augmented_and_preprocessed_input_batch['inputs'],
rngs={'dropout': rng},
train=train)
return logits, None
def _binary_cross_entropy_with_mask(
self,
labels: jnp.ndarray,
logits: jnp.ndarray,
mask: jnp.ndarray,
label_smoothing: float = 0.0) -> jnp.ndarray:
"""Binary cross entropy loss for logits, with masked elements."""
if not (logits.shape == labels.shape == mask.shape): # pylint: disable=superfluous-parens
raise ValueError(
f'Shape mismatch between logits ({logits.shape}), targets '
f'({labels.shape}), and weights ({mask.shape}).')
if len(logits.shape) != 2:
raise ValueError(f'Rank of logits ({logits.shape}) must be 2.')
# To prevent propagation of NaNs during grad().
# We mask over the loss for invalid targets later.
labels = jnp.where(mask, labels, -1)
# Apply label smoothing.
smoothed_labels = optax.smooth_labels(labels, label_smoothing)
# Numerically stable implementation of BCE loss.
# This mimics TensorFlow's tf.nn.sigmoid_cross_entropy_with_logits().
positive_logits = logits >= 0
relu_logits = jnp.where(positive_logits, logits, 0)
abs_logits = jnp.where(positive_logits, logits, -logits)
losses = relu_logits - (logits * smoothed_labels) + (
jnp.log(1 + jnp.exp(-abs_logits)))
return jnp.where(mask, losses, 0.)
def _eval_metric(self, labels, logits, masks):
loss = self.loss_fn(labels, logits, masks)
return metrics.EvalMetrics.single_from_model_output(
loss=loss['per_example'], logits=logits, labels=labels, mask=masks)
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0, None),
static_broadcasted_argnums=(0,))
def _eval_batch(self, params, batch, model_state, rng):
return super()._eval_batch(params, batch, model_state, rng)
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
del num_examples
total_metrics = total_metrics.reduce()
return {k: float(v) for k, v in total_metrics.compute().items()}
|
"""ImageNet workload parent class."""
import math
from typing import Dict, Iterator, Optional, Tuple
from algorithmic_efficiency import spec
class BaseImagenetResNetWorkload(spec.Workload):
_num_classes: int = 1000
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'accuracy'
def has_reached_validation_target(self, eval_result: Dict[str,
float]) -> bool:
return eval_result['validation/accuracy'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 1 - 0.22569 # 0.77431
def has_reached_test_target(self, eval_result: Dict[str, float]) -> bool:
return eval_result['test/accuracy'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 1 - 0.3440 # 0.6560
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
return 1_281_167
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
return 50_000
@property
def num_test_examples(self) -> int:
return 10_000 # ImageNet-v2.
@property
def eval_batch_size(self) -> int:
return 1024
@property
def train_mean(self) -> Tuple[float, float, float]:
return (0.485 * 255, 0.456 * 255, 0.406 * 255)
@property
def train_stddev(self) -> Tuple[float, float, float]:
return (0.229 * 255, 0.224 * 255, 0.225 * 255)
# Data augmentation settings.
@property
def scale_ratio_range(self) -> Tuple[float, float]:
return (0.08, 1.0)
@property
def aspect_ratio_range(self) -> Tuple[float, float]:
return (0.75, 4.0 / 3.0)
@property
def center_crop_size(self) -> int:
return 224
@property
def resize_size(self) -> int:
return 256
@property
def max_allowed_runtime_sec(self) -> int:
return 63_008 # ~17.5 hours
@property
def eval_period_time_sec(self) -> int:
return 510 # 8.5 minutes.
def _build_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
use_mixup: bool = False,
use_randaug: bool = False) -> Iterator[Dict[str, spec.Tensor]]:
raise NotImplementedError
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del num_batches
if split == 'test':
if not cache:
raise ValueError('cache must be True for split=test.')
if not repeat_final_dataset:
raise ValueError('repeat_final_dataset must be True for split=test.')
return self._build_dataset(data_rng,
split,
data_dir,
global_batch_size,
cache,
repeat_final_dataset)
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 186_666
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.